1 //
   2 // Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
   3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4 //
   5 // This code is free software; you can redistribute it and/or modify it
   6 // under the terms of the GNU General Public License version 2 only, as
   7 // published by the Free Software Foundation.
   8 //
   9 // This code is distributed in the hope that it will be useful, but WITHOUT
  10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12 // version 2 for more details (a copy is included in the LICENSE file that
  13 // accompanied this code).
  14 //
  15 // You should have received a copy of the GNU General Public License version
  16 // 2 along with this work; if not, write to the Free Software Foundation,
  17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18 //
  19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20 // or visit www.oracle.com if you need additional information or have any
  21 // questions.
  22 //
  23 
  24 // ARM Architecture Description File
  25 
  26 //----------DEFINITION BLOCK---------------------------------------------------
  27 // Define name --> value mappings to inform the ADLC of an integer valued name
  28 // Current support includes integer values in the range [0, 0x7FFFFFFF]
  29 // Format:
  30 //        int_def  <name>         ( <int_value>, <expression>);
  31 // Generated Code in ad_<arch>.hpp
  32 //        #define  <name>   (<expression>)
  33 //        // value == <int_value>
  34 // Generated code in ad_<arch>.cpp adlc_verification()
  35 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
  36 //
  37 definitions %{
  38 // The default cost (of an ALU instruction).
  39   int_def DEFAULT_COST      (    100,     100);
  40   int_def HUGE_COST         (1000000, 1000000);
  41 
  42 // Memory refs are twice as expensive as run-of-the-mill.
  43   int_def MEMORY_REF_COST   (    200, DEFAULT_COST * 2);
  44 
  45 // Branches are even more expensive.
  46   int_def BRANCH_COST       (    300, DEFAULT_COST * 3);
  47   int_def CALL_COST         (    300, DEFAULT_COST * 3);
  48 %}
  49 
  50 
  51 //----------SOURCE BLOCK-------------------------------------------------------
  52 // This is a block of C++ code which provides values, functions, and
  53 // definitions necessary in the rest of the architecture description
  54 source_hpp %{
  55 // Header information of the source block.
  56 // Method declarations/definitions which are used outside
  57 // the ad-scope can conveniently be defined here.
  58 //
  59 // To keep related declarations/definitions/uses close together,
  60 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
  61 
  62 // Does destination need to be loaded in a register then passed to a
  63 // branch instruction?
  64 extern bool maybe_far_call(const CallNode *n);
  65 extern bool maybe_far_call(const MachCallNode *n);
  66 static inline bool cache_reachable() {
  67   return MacroAssembler::_cache_fully_reachable();
  68 }
  69 
  70 #define ldr_32 ldr
  71 #define str_32 str
  72 #define tst_32 tst
  73 #define teq_32 teq
  74 #if 1
  75 extern bool PrintOptoAssembly;
  76 #endif
  77 
  78 class c2 {
  79 public:
  80   static OptoRegPair return_value(int ideal_reg);
  81 };
  82 
  83 class CallStubImpl {
  84 
  85   //--------------------------------------------------------------
  86   //---<  Used for optimization in Compile::Shorten_branches  >---
  87   //--------------------------------------------------------------
  88 
  89  public:
  90   // Size of call trampoline stub.
  91   static uint size_call_trampoline() {
  92     return 0; // no call trampolines on this platform
  93   }
  94 
  95   // number of relocations needed by a call trampoline stub
  96   static uint reloc_call_trampoline() {
  97     return 0; // no call trampolines on this platform
  98   }
  99 };
 100 
 101 class HandlerImpl {
 102 
 103  public:
 104 
 105   static int emit_exception_handler(CodeBuffer &cbuf);
 106   static int emit_deopt_handler(CodeBuffer& cbuf);
 107 
 108   static uint size_exception_handler() {
 109     return ( 3 * 4 );
 110   }
 111 
 112 
 113   static uint size_deopt_handler() {
 114     return ( 9 * 4 );
 115   }
 116 
 117 };
 118 
 119 %}
 120 
 121 source %{
 122 #define __ _masm.
 123 
 124 static FloatRegister reg_to_FloatRegister_object(int register_encoding);
 125 static Register reg_to_register_object(int register_encoding);
 126 
 127 
 128 // ****************************************************************************
 129 
 130 // REQUIRED FUNCTIONALITY
 131 
 132 // Indicate if the safepoint node needs the polling page as an input.
 133 // Since ARM does not have absolute addressing, it does.
 134 bool SafePointNode::needs_polling_address_input() {
 135   return true;
 136 }
 137 
 138 // emit an interrupt that is caught by the debugger (for debugging compiler)
 139 void emit_break(CodeBuffer &cbuf) {
 140   MacroAssembler _masm(&cbuf);
 141   __ breakpoint();
 142 }
 143 
 144 #ifndef PRODUCT
 145 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream *st ) const {
 146   st->print("TA");
 147 }
 148 #endif
 149 
 150 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 151   emit_break(cbuf);
 152 }
 153 
 154 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
 155   return MachNode::size(ra_);
 156 }
 157 
 158 
 159 void emit_nop(CodeBuffer &cbuf) {
 160   MacroAssembler _masm(&cbuf);
 161   __ nop();
 162 }
 163 
 164 
 165 void emit_call_reloc(CodeBuffer &cbuf, const MachCallNode *n, MachOper *m, RelocationHolder const& rspec) {
 166   int ret_addr_offset0 = n->as_MachCall()->ret_addr_offset();
 167   int call_site_offset = cbuf.insts()->mark_off();
 168   MacroAssembler _masm(&cbuf);
 169   __ set_inst_mark(); // needed in emit_to_interp_stub() to locate the call
 170   address target = (address)m->method();
 171   assert(n->as_MachCall()->entry_point() == target, "sanity");
 172   assert(maybe_far_call(n) == !__ reachable_from_cache(target), "sanity");
 173   assert(cache_reachable() == __ cache_fully_reachable(), "sanity");
 174 
 175   assert(target != NULL, "need real address");
 176 
 177   int ret_addr_offset = -1;
 178   if (rspec.type() == relocInfo::runtime_call_type) {
 179     __ call(target, rspec);
 180     ret_addr_offset = __ offset();
 181   } else {
 182     // scratches Rtemp
 183     ret_addr_offset = __ patchable_call(target, rspec, true);
 184   }
 185   assert(ret_addr_offset - call_site_offset == ret_addr_offset0, "fix ret_addr_offset()");
 186 }
 187 
 188 //=============================================================================
 189 // REQUIRED FUNCTIONALITY for encoding
 190 void emit_lo(CodeBuffer &cbuf, int val) {  }
 191 void emit_hi(CodeBuffer &cbuf, int val) {  }
 192 
 193 
 194 //=============================================================================
 195 const RegMask& MachConstantBaseNode::_out_RegMask = PTR_REG_mask();
 196 
 197 int Compile::ConstantTable::calculate_table_base_offset() const {
 198   int offset = -(size() / 2);
 199   // flds, fldd: 8-bit  offset multiplied by 4: +/- 1024
 200   // ldr, ldrb : 12-bit offset:                 +/- 4096
 201   if (!Assembler::is_simm10(offset)) {
 202     offset = Assembler::min_simm10();
 203   }
 204   return offset;
 205 }
 206 
 207 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
 208 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
 209   ShouldNotReachHere();
 210 }
 211 
 212 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
 213   Compile* C = ra_->C;
 214   Compile::ConstantTable& constant_table = C->constant_table();
 215   MacroAssembler _masm(&cbuf);
 216 
 217   Register r = as_Register(ra_->get_encode(this));
 218   CodeSection* consts_section = __ code()->consts();
 219   int consts_size = consts_section->align_at_start(consts_section->size());
 220   assert(constant_table.size() == consts_size, "must be: %d == %d", constant_table.size(), consts_size);
 221 
 222   // Materialize the constant table base.
 223   address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
 224   RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
 225   __ mov_address(r, baseaddr, rspec);
 226 }
 227 
 228 uint MachConstantBaseNode::size(PhaseRegAlloc*) const {
 229   return 8;
 230 }
 231 
 232 #ifndef PRODUCT
 233 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
 234   char reg[128];
 235   ra_->dump_register(this, reg);
 236   st->print("MOV_SLOW    &constanttable,%s\t! constant table base", reg);
 237 }
 238 #endif
 239 
 240 #ifndef PRODUCT
 241 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
 242   Compile* C = ra_->C;
 243 
 244   for (int i = 0; i < OptoPrologueNops; i++) {
 245     st->print_cr("NOP"); st->print("\t");
 246   }
 247 
 248   size_t framesize = C->frame_size_in_bytes();
 249   assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
 250   int bangsize = C->bang_size_in_bytes();
 251   // Remove two words for return addr and rbp,
 252   framesize -= 2*wordSize;
 253   bangsize -= 2*wordSize;
 254 
 255   // Calls to C2R adapters often do not accept exceptional returns.
 256   // We require that their callers must bang for them.  But be careful, because
 257   // some VM calls (such as call site linkage) can use several kilobytes of
 258   // stack.  But the stack safety zone should account for that.
 259   // See bugs 4446381, 4468289, 4497237.
 260   if (C->need_stack_bang(bangsize)) {
 261     st->print_cr("! stack bang (%d bytes)", bangsize); st->print("\t");
 262   }
 263   st->print_cr("PUSH   R_FP|R_LR_LR"); st->print("\t");
 264   if (framesize != 0) {
 265     st->print   ("SUB    R_SP, R_SP, " SIZE_FORMAT,framesize);
 266   }
 267 }
 268 #endif
 269 
 270 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 271   Compile* C = ra_->C;
 272   MacroAssembler _masm(&cbuf);
 273 
 274   for (int i = 0; i < OptoPrologueNops; i++) {
 275     __ nop();
 276   }
 277 
 278   size_t framesize = C->frame_size_in_bytes();
 279   assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
 280   int bangsize = C->bang_size_in_bytes();
 281   // Remove two words for return addr and fp,
 282   framesize -= 2*wordSize;
 283   bangsize -= 2*wordSize;
 284 
 285   // Calls to C2R adapters often do not accept exceptional returns.
 286   // We require that their callers must bang for them.  But be careful, because
 287   // some VM calls (such as call site linkage) can use several kilobytes of
 288   // stack.  But the stack safety zone should account for that.
 289   // See bugs 4446381, 4468289, 4497237.
 290   if (C->need_stack_bang(bangsize)) {
 291     __ arm_stack_overflow_check(bangsize, Rtemp);
 292   }
 293 
 294   __ raw_push(FP, LR);
 295   if (framesize != 0) {
 296     __ sub_slow(SP, SP, framesize);
 297   }
 298 
 299   // offset from scratch buffer is not valid
 300   if (strcmp(cbuf.name(), "Compile::Fill_buffer") == 0) {
 301     C->set_frame_complete( __ offset() );
 302   }
 303 
 304   if (C->has_mach_constant_base_node()) {
 305     // NOTE: We set the table base offset here because users might be
 306     // emitted before MachConstantBaseNode.
 307     Compile::ConstantTable& constant_table = C->constant_table();
 308     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
 309   }
 310 }
 311 
 312 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
 313   return MachNode::size(ra_);
 314 }
 315 
 316 int MachPrologNode::reloc() const {
 317   return 10; // a large enough number
 318 }
 319 
 320 //=============================================================================
 321 #ifndef PRODUCT
 322 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
 323   Compile* C = ra_->C;
 324 
 325   size_t framesize = C->frame_size_in_bytes();
 326   framesize -= 2*wordSize;
 327 
 328   if (framesize != 0) {
 329     st->print("ADD    R_SP, R_SP, " SIZE_FORMAT "\n\t",framesize);
 330   }
 331   st->print("POP    R_FP|R_LR_LR");
 332 
 333   if (do_polling() && ra_->C->is_method_compilation()) {
 334     st->print("\n\t");
 335     st->print("MOV    Rtemp, #PollAddr\t! Load Polling address\n\t");
 336     st->print("LDR    Rtemp,[Rtemp]\t!Poll for Safepointing");
 337   }
 338 }
 339 #endif
 340 
 341 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 342   MacroAssembler _masm(&cbuf);
 343   Compile* C = ra_->C;
 344 
 345   size_t framesize = C->frame_size_in_bytes();
 346   framesize -= 2*wordSize;
 347   if (framesize != 0) {
 348     __ add_slow(SP, SP, framesize);
 349   }
 350   __ raw_pop(FP, LR);
 351 
 352   // If this does safepoint polling, then do it here
 353   if (do_polling() && ra_->C->is_method_compilation()) {
 354     // mov_slow here is usually one or two instruction
 355     __ mov_address(Rtemp, (address)os::get_polling_page(), symbolic_Relocation::polling_page_reference);
 356     __ relocate(relocInfo::poll_return_type);
 357     __ ldr(Rtemp, Address(Rtemp));
 358   }
 359 }
 360 
 361 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 362   return MachNode::size(ra_);
 363 }
 364 
 365 int MachEpilogNode::reloc() const {
 366   return 16; // a large enough number
 367 }
 368 
 369 const Pipeline * MachEpilogNode::pipeline() const {
 370   return MachNode::pipeline_class();
 371 }
 372 
 373 int MachEpilogNode::safepoint_offset() const {
 374   assert( do_polling(), "no return for this epilog node");
 375   //  return MacroAssembler::size_of_sethi(os::get_polling_page());
 376   Unimplemented();
 377   return 0;
 378 }
 379 
 380 //=============================================================================
 381 
 382 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
 383 enum RC { rc_bad, rc_int, rc_float, rc_stack };
 384 static enum RC rc_class( OptoReg::Name reg ) {
 385   if (!OptoReg::is_valid(reg)) return rc_bad;
 386   if (OptoReg::is_stack(reg)) return rc_stack;
 387   VMReg r = OptoReg::as_VMReg(reg);
 388   if (r->is_Register()) return rc_int;
 389   assert(r->is_FloatRegister(), "must be");
 390   return rc_float;
 391 }
 392 
 393 static inline bool is_iRegLd_memhd(OptoReg::Name src_first, OptoReg::Name src_second, int offset) {
 394   int rlo = Matcher::_regEncode[src_first];
 395   int rhi = Matcher::_regEncode[src_second];
 396   if (!((rlo&1)==0 && (rlo+1 == rhi))) {
 397     tty->print_cr("CAUGHT BAD LDRD/STRD");
 398   }
 399   return (rlo&1)==0 && (rlo+1 == rhi) && is_memoryHD(offset);
 400 }
 401 
 402 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
 403                                         PhaseRegAlloc *ra_,
 404                                         bool do_size,
 405                                         outputStream* st ) const {
 406   // Get registers to move
 407   OptoReg::Name src_second = ra_->get_reg_second(in(1));
 408   OptoReg::Name src_first = ra_->get_reg_first(in(1));
 409   OptoReg::Name dst_second = ra_->get_reg_second(this );
 410   OptoReg::Name dst_first = ra_->get_reg_first(this );
 411 
 412   enum RC src_second_rc = rc_class(src_second);
 413   enum RC src_first_rc = rc_class(src_first);
 414   enum RC dst_second_rc = rc_class(dst_second);
 415   enum RC dst_first_rc = rc_class(dst_first);
 416 
 417   assert( OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
 418 
 419   // Generate spill code!
 420   int size = 0;
 421 
 422   if (src_first == dst_first && src_second == dst_second)
 423     return size;            // Self copy, no move
 424 
 425 #ifdef TODO
 426   if (bottom_type()->isa_vect() != NULL) {
 427   }
 428 #endif
 429 
 430   // Shared code does not expect instruction set capability based bailouts here.
 431   // Handle offset unreachable bailout with minimal change in shared code.
 432   // Bailout only for real instruction emit.
 433   // This requires a single comment change in shared code. ( see output.cpp "Normal" instruction case )
 434 
 435   MacroAssembler _masm(cbuf);
 436 
 437   // --------------------------------------
 438   // Check for mem-mem move.  Load into unused float registers and fall into
 439   // the float-store case.
 440   if (src_first_rc == rc_stack && dst_first_rc == rc_stack) {
 441     int offset = ra_->reg2offset(src_first);
 442     if (cbuf && !is_memoryfp(offset)) {
 443       ra_->C->record_method_not_compilable("unable to handle large constant offsets");
 444       return 0;
 445     } else {
 446       if (src_second_rc != rc_bad) {
 447         assert((src_first&1)==0 && src_first+1 == src_second, "pair of registers must be aligned/contiguous");
 448         src_first     = OptoReg::Name(R_mem_copy_lo_num);
 449         src_second    = OptoReg::Name(R_mem_copy_hi_num);
 450         src_first_rc  = rc_float;
 451         src_second_rc = rc_float;
 452         if (cbuf) {
 453           __ ldr_double(Rmemcopy, Address(SP, offset));
 454         } else if (!do_size) {
 455           st->print(LDR_DOUBLE "   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(src_first),offset);
 456         }
 457       } else {
 458         src_first     = OptoReg::Name(R_mem_copy_lo_num);
 459         src_first_rc  = rc_float;
 460         if (cbuf) {
 461           __ ldr_float(Rmemcopy, Address(SP, offset));
 462         } else if (!do_size) {
 463           st->print(LDR_FLOAT "   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(src_first),offset);
 464         }
 465       }
 466       size += 4;
 467     }
 468   }
 469 
 470   if (src_second_rc == rc_stack && dst_second_rc == rc_stack) {
 471     Unimplemented();
 472   }
 473 
 474   // --------------------------------------
 475   // Check for integer reg-reg copy
 476   if (src_first_rc == rc_int && dst_first_rc == rc_int) {
 477     // Else normal reg-reg copy
 478     assert( src_second != dst_first, "smashed second before evacuating it" );
 479     if (cbuf) {
 480       __ mov(reg_to_register_object(Matcher::_regEncode[dst_first]), reg_to_register_object(Matcher::_regEncode[src_first]));
 481 #ifndef PRODUCT
 482     } else if (!do_size) {
 483       st->print("MOV    R_%s, R_%s\t# spill",
 484                 Matcher::regName[dst_first],
 485                 Matcher::regName[src_first]);
 486 #endif
 487     }
 488     size += 4;
 489   }
 490 
 491   // Check for integer store
 492   if (src_first_rc == rc_int && dst_first_rc == rc_stack) {
 493     int offset = ra_->reg2offset(dst_first);
 494     if (cbuf && !is_memoryI(offset)) {
 495       ra_->C->record_method_not_compilable("unable to handle large constant offsets");
 496       return 0;
 497     } else {
 498       if (src_second_rc != rc_bad && is_iRegLd_memhd(src_first, src_second, offset)) {
 499         assert((src_first&1)==0 && src_first+1 == src_second, "pair of registers must be aligned/contiguous");
 500         if (cbuf) {
 501           __ str_64(reg_to_register_object(Matcher::_regEncode[src_first]), Address(SP, offset));
 502 #ifndef PRODUCT
 503         } else if (!do_size) {
 504           if (size != 0) st->print("\n\t");
 505           st->print(STR_64 "   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(src_first), offset);
 506 #endif
 507         }
 508         return size + 4;
 509       } else {
 510         if (cbuf) {
 511           __ str_32(reg_to_register_object(Matcher::_regEncode[src_first]), Address(SP, offset));
 512 #ifndef PRODUCT
 513         } else if (!do_size) {
 514           if (size != 0) st->print("\n\t");
 515           st->print(STR_32 "   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(src_first), offset);
 516 #endif
 517         }
 518       }
 519     }
 520     size += 4;
 521   }
 522 
 523   // Check for integer load
 524   if (dst_first_rc == rc_int && src_first_rc == rc_stack) {
 525     int offset = ra_->reg2offset(src_first);
 526     if (cbuf && !is_memoryI(offset)) {
 527       ra_->C->record_method_not_compilable("unable to handle large constant offsets");
 528       return 0;
 529     } else {
 530       if (src_second_rc != rc_bad && is_iRegLd_memhd(dst_first, dst_second, offset)) {
 531         assert((src_first&1)==0 && src_first+1 == src_second, "pair of registers must be aligned/contiguous");
 532         if (cbuf) {
 533           __ ldr_64(reg_to_register_object(Matcher::_regEncode[dst_first]), Address(SP, offset));
 534 #ifndef PRODUCT
 535         } else if (!do_size) {
 536           if (size != 0) st->print("\n\t");
 537           st->print(LDR_64 "   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(dst_first), offset);
 538 #endif
 539         }
 540         return size + 4;
 541       } else {
 542         if (cbuf) {
 543           __ ldr_32(reg_to_register_object(Matcher::_regEncode[dst_first]), Address(SP, offset));
 544 #ifndef PRODUCT
 545         } else if (!do_size) {
 546           if (size != 0) st->print("\n\t");
 547           st->print(LDR_32 "   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(dst_first), offset);
 548 #endif
 549         }
 550       }
 551     }
 552     size += 4;
 553   }
 554 
 555   // Check for float reg-reg copy
 556   if (src_first_rc == rc_float && dst_first_rc == rc_float) {
 557     if (src_second_rc != rc_bad) {
 558       assert((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second, "pairs of registers must be aligned/contiguous");
 559       if (cbuf) {
 560       __ mov_double(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), reg_to_FloatRegister_object(Matcher::_regEncode[src_first]));
 561 #ifndef PRODUCT
 562       } else if (!do_size) {
 563         st->print(MOV_DOUBLE "    R_%s, R_%s\t# spill",
 564                   Matcher::regName[dst_first],
 565                   Matcher::regName[src_first]);
 566 #endif
 567       }
 568       return 4;
 569     }
 570     if (cbuf) {
 571       __ mov_float(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), reg_to_FloatRegister_object(Matcher::_regEncode[src_first]));
 572 #ifndef PRODUCT
 573     } else if (!do_size) {
 574       st->print(MOV_FLOAT "    R_%s, R_%s\t# spill",
 575                 Matcher::regName[dst_first],
 576                 Matcher::regName[src_first]);
 577 #endif
 578     }
 579     size = 4;
 580   }
 581 
 582   // Check for float store
 583   if (src_first_rc == rc_float && dst_first_rc == rc_stack) {
 584     int offset = ra_->reg2offset(dst_first);
 585     if (cbuf && !is_memoryfp(offset)) {
 586       ra_->C->record_method_not_compilable("unable to handle large constant offsets");
 587       return 0;
 588     } else {
 589       // Further check for aligned-adjacent pair, so we can use a double store
 590       if (src_second_rc != rc_bad) {
 591         assert((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second, "pairs of registers and stack slots must be aligned/contiguous");
 592         if (cbuf) {
 593           __ str_double(reg_to_FloatRegister_object(Matcher::_regEncode[src_first]), Address(SP, offset));
 594 #ifndef PRODUCT
 595         } else if (!do_size) {
 596           if (size != 0) st->print("\n\t");
 597           st->print(STR_DOUBLE "   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(src_first),offset);
 598 #endif
 599         }
 600         return size + 4;
 601       } else {
 602         if (cbuf) {
 603           __ str_float(reg_to_FloatRegister_object(Matcher::_regEncode[src_first]), Address(SP, offset));
 604 #ifndef PRODUCT
 605         } else if (!do_size) {
 606           if (size != 0) st->print("\n\t");
 607           st->print(STR_FLOAT "   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(src_first),offset);
 608 #endif
 609         }
 610       }
 611     }
 612     size += 4;
 613   }
 614 
 615   // Check for float load
 616   if (dst_first_rc == rc_float && src_first_rc == rc_stack) {
 617     int offset = ra_->reg2offset(src_first);
 618     if (cbuf && !is_memoryfp(offset)) {
 619       ra_->C->record_method_not_compilable("unable to handle large constant offsets");
 620       return 0;
 621     } else {
 622       // Further check for aligned-adjacent pair, so we can use a double store
 623       if (src_second_rc != rc_bad) {
 624         assert((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second, "pairs of registers and stack slots must be aligned/contiguous");
 625         if (cbuf) {
 626           __ ldr_double(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), Address(SP, offset));
 627 #ifndef PRODUCT
 628         } else if (!do_size) {
 629           if (size != 0) st->print("\n\t");
 630           st->print(LDR_DOUBLE "   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(dst_first),offset);
 631 #endif
 632         }
 633         return size + 4;
 634       } else {
 635         if (cbuf) {
 636           __ ldr_float(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), Address(SP, offset));
 637 #ifndef PRODUCT
 638         } else if (!do_size) {
 639           if (size != 0) st->print("\n\t");
 640           st->print(LDR_FLOAT "   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(dst_first),offset);
 641 #endif
 642         }
 643       }
 644     }
 645     size += 4;
 646   }
 647 
 648   // check for int reg -> float reg move
 649   if (src_first_rc == rc_int && dst_first_rc == rc_float) {
 650     // Further check for aligned-adjacent pair, so we can use a single instruction
 651     if (src_second_rc != rc_bad) {
 652       assert((dst_first&1)==0 && dst_first+1 == dst_second, "pairs of registers must be aligned/contiguous");
 653       assert((src_first&1)==0 && src_first+1 == src_second, "pairs of registers must be aligned/contiguous");
 654       assert(src_second_rc == rc_int && dst_second_rc == rc_float, "unsupported");
 655       if (cbuf) {
 656         __ fmdrr(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), reg_to_register_object(Matcher::_regEncode[src_first]), reg_to_register_object(Matcher::_regEncode[src_second]));
 657 #ifndef PRODUCT
 658       } else if (!do_size) {
 659         if (size != 0) st->print("\n\t");
 660         st->print("FMDRR   R_%s, R_%s, R_%s\t! spill",OptoReg::regname(dst_first), OptoReg::regname(src_first), OptoReg::regname(src_second));
 661 #endif
 662       }
 663       return size + 4;
 664     } else {
 665       if (cbuf) {
 666         __ fmsr(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), reg_to_register_object(Matcher::_regEncode[src_first]));
 667 #ifndef PRODUCT
 668       } else if (!do_size) {
 669         if (size != 0) st->print("\n\t");
 670         st->print(FMSR "   R_%s, R_%s\t! spill",OptoReg::regname(dst_first), OptoReg::regname(src_first));
 671 #endif
 672       }
 673       size += 4;
 674     }
 675   }
 676 
 677   // check for float reg -> int reg move
 678   if (src_first_rc == rc_float && dst_first_rc == rc_int) {
 679     // Further check for aligned-adjacent pair, so we can use a single instruction
 680     if (src_second_rc != rc_bad) {
 681       assert((src_first&1)==0 && src_first+1 == src_second, "pairs of registers must be aligned/contiguous");
 682       assert((dst_first&1)==0 && dst_first+1 == dst_second, "pairs of registers must be aligned/contiguous");
 683       assert(src_second_rc == rc_float && dst_second_rc == rc_int, "unsupported");
 684       if (cbuf) {
 685         __ fmrrd(reg_to_register_object(Matcher::_regEncode[dst_first]), reg_to_register_object(Matcher::_regEncode[dst_second]), reg_to_FloatRegister_object(Matcher::_regEncode[src_first]));
 686 #ifndef PRODUCT
 687       } else if (!do_size) {
 688         if (size != 0) st->print("\n\t");
 689         st->print("FMRRD   R_%s, R_%s, R_%s\t! spill",OptoReg::regname(dst_first), OptoReg::regname(dst_second), OptoReg::regname(src_first));
 690 #endif
 691       }
 692       return size + 4;
 693     } else {
 694       if (cbuf) {
 695         __ fmrs(reg_to_register_object(Matcher::_regEncode[dst_first]), reg_to_FloatRegister_object(Matcher::_regEncode[src_first]));
 696 #ifndef PRODUCT
 697       } else if (!do_size) {
 698         if (size != 0) st->print("\n\t");
 699         st->print(FMRS "   R_%s, R_%s\t! spill",OptoReg::regname(dst_first), OptoReg::regname(src_first));
 700 #endif
 701       }
 702       size += 4;
 703     }
 704   }
 705 
 706   // --------------------------------------------------------------------
 707   // Check for hi bits still needing moving.  Only happens for misaligned
 708   // arguments to native calls.
 709   if (src_second == dst_second)
 710     return size;               // Self copy; no move
 711   assert( src_second_rc != rc_bad && dst_second_rc != rc_bad, "src_second & dst_second cannot be Bad" );
 712 
 713   // Check for integer reg-reg copy.  Hi bits are stuck up in the top
 714   // 32-bits of a 64-bit register, but are needed in low bits of another
 715   // register (else it's a hi-bits-to-hi-bits copy which should have
 716   // happened already as part of a 64-bit move)
 717   if (src_second_rc == rc_int && dst_second_rc == rc_int) {
 718     if (cbuf) {
 719       __ mov(reg_to_register_object(Matcher::_regEncode[dst_second]), reg_to_register_object(Matcher::_regEncode[src_second]));
 720 #ifndef PRODUCT
 721     } else if (!do_size) {
 722       if (size != 0) st->print("\n\t");
 723       st->print("MOV    R_%s, R_%s\t# spill high",
 724                 Matcher::regName[dst_second],
 725                 Matcher::regName[src_second]);
 726 #endif
 727     }
 728     return size+4;
 729   }
 730 
 731   // Check for high word integer store
 732   if (src_second_rc == rc_int && dst_second_rc == rc_stack) {
 733     int offset = ra_->reg2offset(dst_second);
 734 
 735     if (cbuf && !is_memoryP(offset)) {
 736       ra_->C->record_method_not_compilable("unable to handle large constant offsets");
 737       return 0;
 738     } else {
 739       if (cbuf) {
 740         __ str(reg_to_register_object(Matcher::_regEncode[src_second]), Address(SP, offset));
 741 #ifndef PRODUCT
 742       } else if (!do_size) {
 743         if (size != 0) st->print("\n\t");
 744         st->print("STR   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(src_second), offset);
 745 #endif
 746       }
 747     }
 748     return size + 4;
 749   }
 750 
 751   // Check for high word integer load
 752   if (dst_second_rc == rc_int && src_second_rc == rc_stack) {
 753     int offset = ra_->reg2offset(src_second);
 754     if (cbuf && !is_memoryP(offset)) {
 755       ra_->C->record_method_not_compilable("unable to handle large constant offsets");
 756       return 0;
 757     } else {
 758       if (cbuf) {
 759         __ ldr(reg_to_register_object(Matcher::_regEncode[dst_second]), Address(SP, offset));
 760 #ifndef PRODUCT
 761       } else if (!do_size) {
 762         if (size != 0) st->print("\n\t");
 763         st->print("LDR   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(dst_second), offset);
 764 #endif
 765       }
 766     }
 767     return size + 4;
 768   }
 769 
 770   Unimplemented();
 771   return 0; // Mute compiler
 772 }
 773 
 774 #ifndef PRODUCT
 775 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
 776   implementation( NULL, ra_, false, st );
 777 }
 778 #endif
 779 
 780 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 781   implementation( &cbuf, ra_, false, NULL );
 782 }
 783 
 784 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
 785   return implementation( NULL, ra_, true, NULL );
 786 }
 787 
 788 //=============================================================================
 789 #ifndef PRODUCT
 790 void MachNopNode::format( PhaseRegAlloc *, outputStream *st ) const {
 791   st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
 792 }
 793 #endif
 794 
 795 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
 796   MacroAssembler _masm(&cbuf);
 797   for(int i = 0; i < _count; i += 1) {
 798     __ nop();
 799   }
 800 }
 801 
 802 uint MachNopNode::size(PhaseRegAlloc *ra_) const {
 803   return 4 * _count;
 804 }
 805 
 806 
 807 //=============================================================================
 808 #ifndef PRODUCT
 809 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
 810   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 811   int reg = ra_->get_reg_first(this);
 812   st->print("ADD    %s,R_SP+#%d",Matcher::regName[reg], offset);
 813 }
 814 #endif
 815 
 816 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 817   MacroAssembler _masm(&cbuf);
 818   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 819   int reg = ra_->get_encode(this);
 820   Register dst = reg_to_register_object(reg);
 821 
 822   if (is_aimm(offset)) {
 823     __ add(dst, SP, offset);
 824   } else {
 825     __ mov_slow(dst, offset);
 826     __ add(dst, SP, dst);
 827   }
 828 }
 829 
 830 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
 831   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_)
 832   assert(ra_ == ra_->C->regalloc(), "sanity");
 833   return ra_->C->scratch_emit_size(this);
 834 }
 835 
 836 //=============================================================================
 837 #ifndef PRODUCT
 838 #define R_RTEMP "R_R12"
 839 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
 840   st->print_cr("\nUEP:");
 841   if (UseCompressedClassPointers) {
 842     st->print_cr("\tLDR_w " R_RTEMP ",[R_R0 + oopDesc::klass_offset_in_bytes]\t! Inline cache check");
 843     st->print_cr("\tdecode_klass " R_RTEMP);
 844   } else {
 845     st->print_cr("\tLDR   " R_RTEMP ",[R_R0 + oopDesc::klass_offset_in_bytes]\t! Inline cache check");
 846   }
 847   st->print_cr("\tCMP   " R_RTEMP ",R_R8" );
 848   st->print   ("\tB.NE  SharedRuntime::handle_ic_miss_stub");
 849 }
 850 #endif
 851 
 852 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 853   MacroAssembler _masm(&cbuf);
 854   Register iCache  = reg_to_register_object(Matcher::inline_cache_reg_encode());
 855   assert(iCache == Ricklass, "should be");
 856   Register receiver = R0;
 857 
 858   __ load_klass(Rtemp, receiver);
 859   __ cmp(Rtemp, iCache);
 860   __ jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type, noreg, ne);
 861 }
 862 
 863 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
 864   return MachNode::size(ra_);
 865 }
 866 
 867 
 868 //=============================================================================
 869 
 870 // Emit exception handler code.
 871 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
 872   MacroAssembler _masm(&cbuf);
 873 
 874   address base = __ start_a_stub(size_exception_handler());
 875   if (base == NULL) {
 876     ciEnv::current()->record_failure("CodeCache is full");
 877     return 0;  // CodeBuffer::expand failed
 878   }
 879 
 880   int offset = __ offset();
 881 
 882   // OK to trash LR, because exception blob will kill it
 883   __ jump(OptoRuntime::exception_blob()->entry_point(), relocInfo::runtime_call_type, LR_tmp);
 884 
 885   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
 886 
 887   __ end_a_stub();
 888 
 889   return offset;
 890 }
 891 
 892 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
 893   // Can't use any of the current frame's registers as we may have deopted
 894   // at a poll and everything can be live.
 895   MacroAssembler _masm(&cbuf);
 896 
 897   address base = __ start_a_stub(size_deopt_handler());
 898   if (base == NULL) {
 899     ciEnv::current()->record_failure("CodeCache is full");
 900     return 0;  // CodeBuffer::expand failed
 901   }
 902 
 903   int offset = __ offset();
 904   address deopt_pc = __ pc();
 905 
 906   __ sub(SP, SP, wordSize); // make room for saved PC
 907   __ push(LR); // save LR that may be live when we get here
 908   __ mov_relative_address(LR, deopt_pc);
 909   __ str(LR, Address(SP, wordSize)); // save deopt PC
 910   __ pop(LR); // restore LR
 911   __ jump(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type, noreg);
 912 
 913   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
 914 
 915   __ end_a_stub();
 916   return offset;
 917 }
 918 
 919 const bool Matcher::match_rule_supported(int opcode) {
 920   if (!has_match_rule(opcode))
 921     return false;
 922 
 923   switch (opcode) {
 924   case Op_PopCountI:
 925   case Op_PopCountL:
 926     if (!UsePopCountInstruction)
 927       return false;
 928     break;
 929   case Op_LShiftCntV:
 930   case Op_RShiftCntV:
 931   case Op_AddVB:
 932   case Op_AddVS:
 933   case Op_AddVI:
 934   case Op_AddVL:
 935   case Op_SubVB:
 936   case Op_SubVS:
 937   case Op_SubVI:
 938   case Op_SubVL:
 939   case Op_MulVS:
 940   case Op_MulVI:
 941   case Op_LShiftVB:
 942   case Op_LShiftVS:
 943   case Op_LShiftVI:
 944   case Op_LShiftVL:
 945   case Op_RShiftVB:
 946   case Op_RShiftVS:
 947   case Op_RShiftVI:
 948   case Op_RShiftVL:
 949   case Op_URShiftVB:
 950   case Op_URShiftVS:
 951   case Op_URShiftVI:
 952   case Op_URShiftVL:
 953   case Op_AndV:
 954   case Op_OrV:
 955   case Op_XorV:
 956     return VM_Version::has_simd();
 957   case Op_LoadVector:
 958   case Op_StoreVector:
 959   case Op_AddVF:
 960   case Op_SubVF:
 961   case Op_MulVF:
 962     return VM_Version::has_vfp() || VM_Version::has_simd();
 963   case Op_AddVD:
 964   case Op_SubVD:
 965   case Op_MulVD:
 966   case Op_DivVF:
 967   case Op_DivVD:
 968     return VM_Version::has_vfp();
 969   }
 970 
 971   return true;  // Per default match rules are supported.
 972 }
 973 
 974 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
 975 
 976   // TODO
 977   // identify extra cases that we might want to provide match rules for
 978   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
 979   bool ret_value = match_rule_supported(opcode);
 980   // Add rules here.
 981 
 982   return ret_value;  // Per default match rules are supported.
 983 }
 984 
 985 const bool Matcher::has_predicated_vectors(void) {
 986   return false;
 987 }
 988 
 989 const int Matcher::float_pressure(int default_pressure_threshold) {
 990   return default_pressure_threshold;
 991 }
 992 
 993 int Matcher::regnum_to_fpu_offset(int regnum) {
 994   return regnum - 32; // The FP registers are in the second chunk
 995 }
 996 
 997 // Vector width in bytes
 998 const int Matcher::vector_width_in_bytes(BasicType bt) {
 999   return MaxVectorSize;
1000 }
1001 
1002 // Vector ideal reg corresponding to specified size in bytes
1003 const uint Matcher::vector_ideal_reg(int size) {
1004   assert(MaxVectorSize >= size, "");
1005   switch(size) {
1006     case  8: return Op_VecD;
1007     case 16: return Op_VecX;
1008   }
1009   ShouldNotReachHere();
1010   return 0;
1011 }
1012 
1013 const uint Matcher::vector_shift_count_ideal_reg(int size) {
1014   return vector_ideal_reg(size);
1015 }
1016 
1017 // Limits on vector size (number of elements) loaded into vector.
1018 const int Matcher::max_vector_size(const BasicType bt) {
1019   assert(is_java_primitive(bt), "only primitive type vectors");
1020   return vector_width_in_bytes(bt)/type2aelembytes(bt);
1021 }
1022 
1023 const int Matcher::min_vector_size(const BasicType bt) {
1024   assert(is_java_primitive(bt), "only primitive type vectors");
1025   return 8/type2aelembytes(bt);
1026 }
1027 
1028 // ARM doesn't support misaligned vectors store/load.
1029 const bool Matcher::misaligned_vectors_ok() {
1030   return false;
1031 }
1032 
1033 // ARM doesn't support AES intrinsics
1034 const bool Matcher::pass_original_key_for_aes() {
1035   return false;
1036 }
1037 
1038 const bool Matcher::convL2FSupported(void) {
1039   return false;
1040 }
1041 
1042 // Is this branch offset short enough that a short branch can be used?
1043 //
1044 // NOTE: If the platform does not provide any short branch variants, then
1045 //       this method should return false for offset 0.
1046 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
1047   // The passed offset is relative to address of the branch.
1048   // On ARM a branch displacement is calculated relative to address
1049   // of the branch + 8.
1050   //
1051   // offset -= 8;
1052   // return (Assembler::is_simm24(offset));
1053   return false;
1054 }
1055 
1056 const bool Matcher::isSimpleConstant64(jlong value) {
1057   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
1058   return false;
1059 }
1060 
1061 // No scaling for the parameter the ClearArray node.
1062 const bool Matcher::init_array_count_is_in_bytes = true;
1063 
1064 // Needs 2 CMOV's for longs.
1065 const int Matcher::long_cmove_cost() { return 2; }
1066 
1067 // CMOVF/CMOVD are expensive on ARM.
1068 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
1069 
1070 // Does the CPU require late expand (see block.cpp for description of late expand)?
1071 const bool Matcher::require_postalloc_expand = false;
1072 
1073 // Do we need to mask the count passed to shift instructions or does
1074 // the cpu only look at the lower 5/6 bits anyway?
1075 // FIXME: does this handle vector shifts as well?
1076 const bool Matcher::need_masked_shift_count = true;
1077 
1078 const bool Matcher::convi2l_type_required = true;
1079 
1080 // Should the Matcher clone shifts on addressing modes, expecting them
1081 // to be subsumed into complex addressing expressions or compute them
1082 // into registers?
1083 bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
1084   return clone_base_plus_offset_address(m, mstack, address_visited);
1085 }
1086 
1087 void Compile::reshape_address(AddPNode* addp) {
1088 }
1089 
1090 bool Matcher::narrow_oop_use_complex_address() {
1091   NOT_LP64(ShouldNotCallThis());
1092   assert(UseCompressedOops, "only for compressed oops code");
1093   return false;
1094 }
1095 
1096 bool Matcher::narrow_klass_use_complex_address() {
1097   NOT_LP64(ShouldNotCallThis());
1098   assert(UseCompressedClassPointers, "only for compressed klass code");
1099   return false;
1100 }
1101 
1102 bool Matcher::const_oop_prefer_decode() {
1103   NOT_LP64(ShouldNotCallThis());
1104   return true;
1105 }
1106 
1107 bool Matcher::const_klass_prefer_decode() {
1108   NOT_LP64(ShouldNotCallThis());
1109   return true;
1110 }
1111 
1112 // Is it better to copy float constants, or load them directly from memory?
1113 // Intel can load a float constant from a direct address, requiring no
1114 // extra registers.  Most RISCs will have to materialize an address into a
1115 // register first, so they would do better to copy the constant from stack.
1116 const bool Matcher::rematerialize_float_constants = false;
1117 
1118 // If CPU can load and store mis-aligned doubles directly then no fixup is
1119 // needed.  Else we split the double into 2 integer pieces and move it
1120 // piece-by-piece.  Only happens when passing doubles into C code as the
1121 // Java calling convention forces doubles to be aligned.
1122 const bool Matcher::misaligned_doubles_ok = false;
1123 
1124 // No-op on ARM.
1125 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
1126 }
1127 
1128 // Advertise here if the CPU requires explicit rounding operations
1129 // to implement the UseStrictFP mode.
1130 const bool Matcher::strict_fp_requires_explicit_rounding = false;
1131 
1132 // Are floats converted to double when stored to stack during deoptimization?
1133 // ARM does not handle callee-save floats.
1134 bool Matcher::float_in_double() {
1135   return false;
1136 }
1137 
1138 // Do ints take an entire long register or just half?
1139 // Note that we if-def off of _LP64.
1140 // The relevant question is how the int is callee-saved.  In _LP64
1141 // the whole long is written but de-opt'ing will have to extract
1142 // the relevant 32 bits, in not-_LP64 only the low 32 bits is written.
1143 #ifdef _LP64
1144 const bool Matcher::int_in_long = true;
1145 #else
1146 const bool Matcher::int_in_long = false;
1147 #endif
1148 
1149 // Return whether or not this register is ever used as an argument.  This
1150 // function is used on startup to build the trampoline stubs in generateOptoStub.
1151 // Registers not mentioned will be killed by the VM call in the trampoline, and
1152 // arguments in those registers not be available to the callee.
1153 bool Matcher::can_be_java_arg( int reg ) {
1154   if (reg == R_R0_num ||
1155       reg == R_R1_num ||
1156       reg == R_R2_num ||
1157       reg == R_R3_num) return true;
1158 
1159   if (reg >= R_S0_num &&
1160       reg <= R_S13_num) return true;
1161   return false;
1162 }
1163 
1164 bool Matcher::is_spillable_arg( int reg ) {
1165   return can_be_java_arg(reg);
1166 }
1167 
1168 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
1169   return false;
1170 }
1171 
1172 // Register for DIVI projection of divmodI
1173 RegMask Matcher::divI_proj_mask() {
1174   ShouldNotReachHere();
1175   return RegMask();
1176 }
1177 
1178 // Register for MODI projection of divmodI
1179 RegMask Matcher::modI_proj_mask() {
1180   ShouldNotReachHere();
1181   return RegMask();
1182 }
1183 
1184 // Register for DIVL projection of divmodL
1185 RegMask Matcher::divL_proj_mask() {
1186   ShouldNotReachHere();
1187   return RegMask();
1188 }
1189 
1190 // Register for MODL projection of divmodL
1191 RegMask Matcher::modL_proj_mask() {
1192   ShouldNotReachHere();
1193   return RegMask();
1194 }
1195 
1196 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
1197   return FP_REGP_mask();
1198 }
1199 
1200 bool maybe_far_call(const CallNode *n) {
1201   return !MacroAssembler::_reachable_from_cache(n->as_Call()->entry_point());
1202 }
1203 
1204 bool maybe_far_call(const MachCallNode *n) {
1205   return !MacroAssembler::_reachable_from_cache(n->as_MachCall()->entry_point());
1206 }
1207 
1208 %}
1209 
1210 //----------ENCODING BLOCK-----------------------------------------------------
1211 // This block specifies the encoding classes used by the compiler to output
1212 // byte streams.  Encoding classes are parameterized macros used by
1213 // Machine Instruction Nodes in order to generate the bit encoding of the
1214 // instruction.  Operands specify their base encoding interface with the
1215 // interface keyword.  There are currently supported four interfaces,
1216 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER.  REG_INTER causes an
1217 // operand to generate a function which returns its register number when
1218 // queried.   CONST_INTER causes an operand to generate a function which
1219 // returns the value of the constant when queried.  MEMORY_INTER causes an
1220 // operand to generate four functions which return the Base Register, the
1221 // Index Register, the Scale Value, and the Offset Value of the operand when
1222 // queried.  COND_INTER causes an operand to generate six functions which
1223 // return the encoding code (ie - encoding bits for the instruction)
1224 // associated with each basic boolean condition for a conditional instruction.
1225 //
1226 // Instructions specify two basic values for encoding.  Again, a function
1227 // is available to check if the constant displacement is an oop. They use the
1228 // ins_encode keyword to specify their encoding classes (which must be
1229 // a sequence of enc_class names, and their parameters, specified in
1230 // the encoding block), and they use the
1231 // opcode keyword to specify, in order, their primary, secondary, and
1232 // tertiary opcode.  Only the opcode sections which a particular instruction
1233 // needs for encoding need to be specified.
1234 encode %{
1235   enc_class call_epilog %{
1236     // nothing
1237   %}
1238 
1239   enc_class Java_To_Runtime (method meth) %{
1240     // CALL directly to the runtime
1241     emit_call_reloc(cbuf, as_MachCall(), $meth, runtime_call_Relocation::spec());
1242   %}
1243 
1244   enc_class Java_Static_Call (method meth) %{
1245     // CALL to fixup routine.  Fixup routine uses ScopeDesc info to determine
1246     // who we intended to call.
1247 
1248     if ( !_method) {
1249       emit_call_reloc(cbuf, as_MachCall(), $meth, runtime_call_Relocation::spec());
1250     } else {
1251       int method_index = resolved_method_index(cbuf);
1252       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
1253                                                   : static_call_Relocation::spec(method_index);
1254       emit_call_reloc(cbuf, as_MachCall(), $meth, rspec);
1255 
1256       // Emit stubs for static call.
1257       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
1258       if (stub == NULL) {
1259         ciEnv::current()->record_failure("CodeCache is full");
1260         return;
1261       }
1262     }
1263   %}
1264 
1265   enc_class save_last_PC %{
1266     // preserve mark
1267     address mark = cbuf.insts()->mark();
1268     debug_only(int off0 = cbuf.insts_size());
1269     MacroAssembler _masm(&cbuf);
1270     int ret_addr_offset = as_MachCall()->ret_addr_offset();
1271     __ adr(LR, mark + ret_addr_offset);
1272     __ str(LR, Address(Rthread, JavaThread::last_Java_pc_offset()));
1273     debug_only(int off1 = cbuf.insts_size());
1274     assert(off1 - off0 == 2 * Assembler::InstructionSize, "correct size prediction");
1275     // restore mark
1276     cbuf.insts()->set_mark(mark);
1277   %}
1278 
1279   enc_class preserve_SP %{
1280     // preserve mark
1281     address mark = cbuf.insts()->mark();
1282     debug_only(int off0 = cbuf.insts_size());
1283     MacroAssembler _masm(&cbuf);
1284     // FP is preserved across all calls, even compiled calls.
1285     // Use it to preserve SP in places where the callee might change the SP.
1286     __ mov(Rmh_SP_save, SP);
1287     debug_only(int off1 = cbuf.insts_size());
1288     assert(off1 - off0 == 4, "correct size prediction");
1289     // restore mark
1290     cbuf.insts()->set_mark(mark);
1291   %}
1292 
1293   enc_class restore_SP %{
1294     MacroAssembler _masm(&cbuf);
1295     __ mov(SP, Rmh_SP_save);
1296   %}
1297 
1298   enc_class Java_Dynamic_Call (method meth) %{
1299     MacroAssembler _masm(&cbuf);
1300     Register R8_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode());
1301     assert(R8_ic_reg == Ricklass, "should be");
1302     __ set_inst_mark();
1303     __ movw(R8_ic_reg, ((unsigned int)Universe::non_oop_word()) & 0xffff);
1304     __ movt(R8_ic_reg, ((unsigned int)Universe::non_oop_word()) >> 16);
1305     address  virtual_call_oop_addr = __ inst_mark();
1306     // CALL to fixup routine.  Fixup routine uses ScopeDesc info to determine
1307     // who we intended to call.
1308     int method_index = resolved_method_index(cbuf);
1309     __ relocate(virtual_call_Relocation::spec(virtual_call_oop_addr, method_index));
1310     emit_call_reloc(cbuf, as_MachCall(), $meth, RelocationHolder::none);
1311   %}
1312 
1313   enc_class LdReplImmI(immI src, regD dst, iRegI tmp, int cnt, int wth) %{
1314     // FIXME: load from constant table?
1315     // Load a constant replicated "count" times with width "width"
1316     int count = $cnt$$constant;
1317     int width = $wth$$constant;
1318     assert(count*width == 4, "sanity");
1319     int val = $src$$constant;
1320     if (width < 4) {
1321       int bit_width = width * 8;
1322       val &= (((int)1) << bit_width) - 1; // mask off sign bits
1323       for (int i = 0; i < count - 1; i++) {
1324         val |= (val << bit_width);
1325       }
1326     }
1327     MacroAssembler _masm(&cbuf);
1328 
1329     if (val == -1) {
1330       __ mvn($tmp$$Register, 0);
1331     } else if (val == 0) {
1332       __ mov($tmp$$Register, 0);
1333     } else {
1334       __ movw($tmp$$Register, val & 0xffff);
1335       __ movt($tmp$$Register, (unsigned int)val >> 16);
1336     }
1337     __ fmdrr($dst$$FloatRegister, $tmp$$Register, $tmp$$Register);
1338   %}
1339 
1340   enc_class LdReplImmF(immF src, regD dst, iRegI tmp) %{
1341     // Replicate float con 2 times and pack into vector (8 bytes) in regD.
1342     float fval = $src$$constant;
1343     int val = *((int*)&fval);
1344     MacroAssembler _masm(&cbuf);
1345 
1346     if (val == -1) {
1347       __ mvn($tmp$$Register, 0);
1348     } else if (val == 0) {
1349       __ mov($tmp$$Register, 0);
1350     } else {
1351       __ movw($tmp$$Register, val & 0xffff);
1352       __ movt($tmp$$Register, (unsigned int)val >> 16);
1353     }
1354     __ fmdrr($dst$$FloatRegister, $tmp$$Register, $tmp$$Register);
1355   %}
1356 
1357   enc_class enc_String_Compare(R0RegP str1, R1RegP str2, R2RegI cnt1, R3RegI cnt2, iRegI result, iRegI tmp1, iRegI tmp2) %{
1358     Label Ldone, Lloop;
1359     MacroAssembler _masm(&cbuf);
1360 
1361     Register   str1_reg = $str1$$Register;
1362     Register   str2_reg = $str2$$Register;
1363     Register   cnt1_reg = $cnt1$$Register; // int
1364     Register   cnt2_reg = $cnt2$$Register; // int
1365     Register   tmp1_reg = $tmp1$$Register;
1366     Register   tmp2_reg = $tmp2$$Register;
1367     Register result_reg = $result$$Register;
1368 
1369     assert_different_registers(str1_reg, str2_reg, cnt1_reg, cnt2_reg, tmp1_reg, tmp2_reg);
1370 
1371     // Compute the minimum of the string lengths(str1_reg) and the
1372     // difference of the string lengths (stack)
1373 
1374     // See if the lengths are different, and calculate min in str1_reg.
1375     // Stash diff in tmp2 in case we need it for a tie-breaker.
1376     __ subs_32(tmp2_reg, cnt1_reg, cnt2_reg);
1377     __ mov(cnt1_reg, AsmOperand(cnt1_reg, lsl, exact_log2(sizeof(jchar)))); // scale the limit
1378     __ mov(cnt1_reg, AsmOperand(cnt2_reg, lsl, exact_log2(sizeof(jchar))), pl); // scale the limit
1379 
1380     // reallocate cnt1_reg, cnt2_reg, result_reg
1381     // Note:  limit_reg holds the string length pre-scaled by 2
1382     Register limit_reg = cnt1_reg;
1383     Register  chr2_reg = cnt2_reg;
1384     Register  chr1_reg = tmp1_reg;
1385     // str{12} are the base pointers
1386 
1387     // Is the minimum length zero?
1388     __ cmp_32(limit_reg, 0);
1389     if (result_reg != tmp2_reg) {
1390       __ mov(result_reg, tmp2_reg, eq);
1391     }
1392     __ b(Ldone, eq);
1393 
1394     // Load first characters
1395     __ ldrh(chr1_reg, Address(str1_reg, 0));
1396     __ ldrh(chr2_reg, Address(str2_reg, 0));
1397 
1398     // Compare first characters
1399     __ subs(chr1_reg, chr1_reg, chr2_reg);
1400     if (result_reg != chr1_reg) {
1401       __ mov(result_reg, chr1_reg, ne);
1402     }
1403     __ b(Ldone, ne);
1404 
1405     {
1406       // Check after comparing first character to see if strings are equivalent
1407       // Check if the strings start at same location
1408       __ cmp(str1_reg, str2_reg);
1409       // Check if the length difference is zero
1410       __ cond_cmp(tmp2_reg, 0, eq);
1411       __ mov(result_reg, 0, eq); // result is zero
1412       __ b(Ldone, eq);
1413       // Strings might not be equal
1414     }
1415 
1416     __ subs(chr1_reg, limit_reg, 1 * sizeof(jchar));
1417     if (result_reg != tmp2_reg) {
1418       __ mov(result_reg, tmp2_reg, eq);
1419     }
1420     __ b(Ldone, eq);
1421 
1422     // Shift str1_reg and str2_reg to the end of the arrays, negate limit
1423     __ add(str1_reg, str1_reg, limit_reg);
1424     __ add(str2_reg, str2_reg, limit_reg);
1425     __ neg(limit_reg, chr1_reg);  // limit = -(limit-2)
1426 
1427     // Compare the rest of the characters
1428     __ bind(Lloop);
1429     __ ldrh(chr1_reg, Address(str1_reg, limit_reg));
1430     __ ldrh(chr2_reg, Address(str2_reg, limit_reg));
1431     __ subs(chr1_reg, chr1_reg, chr2_reg);
1432     if (result_reg != chr1_reg) {
1433       __ mov(result_reg, chr1_reg, ne);
1434     }
1435     __ b(Ldone, ne);
1436 
1437     __ adds(limit_reg, limit_reg, sizeof(jchar));
1438     __ b(Lloop, ne);
1439 
1440     // If strings are equal up to min length, return the length difference.
1441     if (result_reg != tmp2_reg) {
1442       __ mov(result_reg, tmp2_reg);
1443     }
1444 
1445     // Otherwise, return the difference between the first mismatched chars.
1446     __ bind(Ldone);
1447   %}
1448 
1449   enc_class enc_String_Equals(R0RegP str1, R1RegP str2, R2RegI cnt, iRegI result, iRegI tmp1, iRegI tmp2) %{
1450     Label Lchar, Lchar_loop, Ldone, Lequal;
1451     MacroAssembler _masm(&cbuf);
1452 
1453     Register   str1_reg = $str1$$Register;
1454     Register   str2_reg = $str2$$Register;
1455     Register    cnt_reg = $cnt$$Register; // int
1456     Register   tmp1_reg = $tmp1$$Register;
1457     Register   tmp2_reg = $tmp2$$Register;
1458     Register result_reg = $result$$Register;
1459 
1460     assert_different_registers(str1_reg, str2_reg, cnt_reg, tmp1_reg, tmp2_reg, result_reg);
1461 
1462     __ cmp(str1_reg, str2_reg); //same char[] ?
1463     __ b(Lequal, eq);
1464 
1465     __ cbz_32(cnt_reg, Lequal); // count == 0
1466 
1467     //rename registers
1468     Register limit_reg = cnt_reg;
1469     Register  chr1_reg = tmp1_reg;
1470     Register  chr2_reg = tmp2_reg;
1471 
1472     __ logical_shift_left(limit_reg, limit_reg, exact_log2(sizeof(jchar)));
1473 
1474     //check for alignment and position the pointers to the ends
1475     __ orr(chr1_reg, str1_reg, str2_reg);
1476     __ tst(chr1_reg, 0x3);
1477 
1478     // notZero means at least one not 4-byte aligned.
1479     // We could optimize the case when both arrays are not aligned
1480     // but it is not frequent case and it requires additional checks.
1481     __ b(Lchar, ne);
1482 
1483     // Compare char[] arrays aligned to 4 bytes.
1484     __ char_arrays_equals(str1_reg, str2_reg, limit_reg, result_reg,
1485                           chr1_reg, chr2_reg, Ldone);
1486 
1487     __ b(Lequal); // equal
1488 
1489     // char by char compare
1490     __ bind(Lchar);
1491     __ mov(result_reg, 0);
1492     __ add(str1_reg, limit_reg, str1_reg);
1493     __ add(str2_reg, limit_reg, str2_reg);
1494     __ neg(limit_reg, limit_reg); //negate count
1495 
1496     // Lchar_loop
1497     __ bind(Lchar_loop);
1498     __ ldrh(chr1_reg, Address(str1_reg, limit_reg));
1499     __ ldrh(chr2_reg, Address(str2_reg, limit_reg));
1500     __ cmp(chr1_reg, chr2_reg);
1501     __ b(Ldone, ne);
1502     __ adds(limit_reg, limit_reg, sizeof(jchar));
1503     __ b(Lchar_loop, ne);
1504 
1505     __ bind(Lequal);
1506     __ mov(result_reg, 1);  //equal
1507 
1508     __ bind(Ldone);
1509   %}
1510 
1511   enc_class enc_Array_Equals(R0RegP ary1, R1RegP ary2, iRegI tmp1, iRegI tmp2, iRegI tmp3, iRegI result) %{
1512     Label Ldone, Lloop, Lequal;
1513     MacroAssembler _masm(&cbuf);
1514 
1515     Register   ary1_reg = $ary1$$Register;
1516     Register   ary2_reg = $ary2$$Register;
1517     Register   tmp1_reg = $tmp1$$Register;
1518     Register   tmp2_reg = $tmp2$$Register;
1519     Register   tmp3_reg = $tmp3$$Register;
1520     Register result_reg = $result$$Register;
1521 
1522     assert_different_registers(ary1_reg, ary2_reg, tmp1_reg, tmp2_reg, tmp3_reg, result_reg);
1523 
1524     int length_offset  = arrayOopDesc::length_offset_in_bytes();
1525     int base_offset    = arrayOopDesc::base_offset_in_bytes(T_CHAR);
1526 
1527     // return true if the same array
1528     __ teq(ary1_reg, ary2_reg);
1529     __ mov(result_reg, 1, eq);
1530     __ b(Ldone, eq); // equal
1531 
1532     __ tst(ary1_reg, ary1_reg);
1533     __ mov(result_reg, 0, eq);
1534     __ b(Ldone, eq);    // not equal
1535 
1536     __ tst(ary2_reg, ary2_reg);
1537     __ mov(result_reg, 0, eq);
1538     __ b(Ldone, eq);    // not equal
1539 
1540     //load the lengths of arrays
1541     __ ldr_s32(tmp1_reg, Address(ary1_reg, length_offset)); // int
1542     __ ldr_s32(tmp2_reg, Address(ary2_reg, length_offset)); // int
1543 
1544     // return false if the two arrays are not equal length
1545     __ teq_32(tmp1_reg, tmp2_reg);
1546     __ mov(result_reg, 0, ne);
1547     __ b(Ldone, ne);    // not equal
1548 
1549     __ tst(tmp1_reg, tmp1_reg);
1550     __ mov(result_reg, 1, eq);
1551     __ b(Ldone, eq);    // zero-length arrays are equal
1552 
1553     // load array addresses
1554     __ add(ary1_reg, ary1_reg, base_offset);
1555     __ add(ary2_reg, ary2_reg, base_offset);
1556 
1557     // renaming registers
1558     Register chr1_reg  =  tmp3_reg;   // for characters in ary1
1559     Register chr2_reg  =  tmp2_reg;   // for characters in ary2
1560     Register limit_reg =  tmp1_reg;   // length
1561 
1562     // set byte count
1563     __ logical_shift_left_32(limit_reg, limit_reg, exact_log2(sizeof(jchar)));
1564 
1565     // Compare char[] arrays aligned to 4 bytes.
1566     __ char_arrays_equals(ary1_reg, ary2_reg, limit_reg, result_reg,
1567                           chr1_reg, chr2_reg, Ldone);
1568     __ bind(Lequal);
1569     __ mov(result_reg, 1);  //equal
1570 
1571     __ bind(Ldone);
1572     %}
1573 %}
1574 
1575 //----------FRAME--------------------------------------------------------------
1576 // Definition of frame structure and management information.
1577 //
1578 //  S T A C K   L A Y O U T    Allocators stack-slot number
1579 //                             |   (to get allocators register number
1580 //  G  Owned by    |        |  v    add VMRegImpl::stack0)
1581 //  r   CALLER     |        |
1582 //  o     |        +--------+      pad to even-align allocators stack-slot
1583 //  w     V        |  pad0  |        numbers; owned by CALLER
1584 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
1585 //  h     ^        |   in   |  5
1586 //        |        |  args  |  4   Holes in incoming args owned by SELF
1587 //  |     |        |        |  3
1588 //  |     |        +--------+
1589 //  V     |        | old out|      Empty on Intel, window on Sparc
1590 //        |    old |preserve|      Must be even aligned.
1591 //        |     SP-+--------+----> Matcher::_old_SP, 8 (or 16 in LP64)-byte aligned
1592 //        |        |   in   |  3   area for Intel ret address
1593 //     Owned by    |preserve|      Empty on Sparc.
1594 //       SELF      +--------+
1595 //        |        |  pad2  |  2   pad to align old SP
1596 //        |        +--------+  1
1597 //        |        | locks  |  0
1598 //        |        +--------+----> VMRegImpl::stack0, 8 (or 16 in LP64)-byte aligned
1599 //        |        |  pad1  | 11   pad to align new SP
1600 //        |        +--------+
1601 //        |        |        | 10
1602 //        |        | spills |  9   spills
1603 //        V        |        |  8   (pad0 slot for callee)
1604 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
1605 //        ^        |  out   |  7
1606 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
1607 //     Owned by    +--------+
1608 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
1609 //        |    new |preserve|      Must be even-aligned.
1610 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
1611 //        |        |        |
1612 //
1613 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
1614 //         known from SELF's arguments and the Java calling convention.
1615 //         Region 6-7 is determined per call site.
1616 // Note 2: If the calling convention leaves holes in the incoming argument
1617 //         area, those holes are owned by SELF.  Holes in the outgoing area
1618 //         are owned by the CALLEE.  Holes should not be nessecary in the
1619 //         incoming area, as the Java calling convention is completely under
1620 //         the control of the AD file.  Doubles can be sorted and packed to
1621 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
1622 //         varargs C calling conventions.
1623 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
1624 //         even aligned with pad0 as needed.
1625 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
1626 //         region 6-11 is even aligned; it may be padded out more so that
1627 //         the region from SP to FP meets the minimum stack alignment.
1628 
1629 frame %{
1630   // What direction does stack grow in (assumed to be same for native & Java)
1631   stack_direction(TOWARDS_LOW);
1632 
1633   // These two registers define part of the calling convention
1634   // between compiled code and the interpreter.
1635   inline_cache_reg(R_Ricklass);          // Inline Cache Register or Method* for I2C
1636   interpreter_method_oop_reg(R_Rmethod); // Method Oop Register when calling interpreter
1637 
1638   // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
1639   cisc_spilling_operand_name(indOffset);
1640 
1641   // Number of stack slots consumed by a Monitor enter
1642   sync_stack_slots(1 * VMRegImpl::slots_per_word);
1643 
1644   // Compiled code's Frame Pointer
1645   frame_pointer(R_R13);
1646 
1647   // Stack alignment requirement
1648   stack_alignment(StackAlignmentInBytes);
1649   //  LP64: Alignment size in bytes (128-bit -> 16 bytes)
1650   // !LP64: Alignment size in bytes (64-bit  ->  8 bytes)
1651 
1652   // Number of stack slots between incoming argument block and the start of
1653   // a new frame.  The PROLOG must add this many slots to the stack.  The
1654   // EPILOG must remove this many slots.
1655   // FP + LR
1656   in_preserve_stack_slots(2 * VMRegImpl::slots_per_word);
1657 
1658   // Number of outgoing stack slots killed above the out_preserve_stack_slots
1659   // for calls to C.  Supports the var-args backing area for register parms.
1660   // ADLC doesn't support parsing expressions, so I folded the math by hand.
1661   varargs_C_out_slots_killed( 0);
1662 
1663   // The after-PROLOG location of the return address.  Location of
1664   // return address specifies a type (REG or STACK) and a number
1665   // representing the register number (i.e. - use a register name) or
1666   // stack slot.
1667   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
1668   // Otherwise, it is above the locks and verification slot and alignment word
1669   return_addr(STACK - 1*VMRegImpl::slots_per_word +
1670               align_up((Compile::current()->in_preserve_stack_slots() +
1671                         Compile::current()->fixed_slots()),
1672                        stack_alignment_in_slots()));
1673 
1674   // Body of function which returns an OptoRegs array locating
1675   // arguments either in registers or in stack slots for calling
1676   // java
1677   calling_convention %{
1678     (void) SharedRuntime::java_calling_convention(sig_bt, regs, length, is_outgoing);
1679 
1680   %}
1681 
1682   // Body of function which returns an OptoRegs array locating
1683   // arguments either in registers or in stack slots for callin
1684   // C.
1685   c_calling_convention %{
1686     // This is obviously always outgoing
1687     (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
1688   %}
1689 
1690   // Location of compiled Java return values.  Same as C
1691   return_value %{
1692     return c2::return_value(ideal_reg);
1693   %}
1694 
1695 %}
1696 
1697 //----------ATTRIBUTES---------------------------------------------------------
1698 //----------Instruction Attributes---------------------------------------------
1699 ins_attrib ins_cost(DEFAULT_COST); // Required cost attribute
1700 ins_attrib ins_size(32);           // Required size attribute (in bits)
1701 ins_attrib ins_short_branch(0);    // Required flag: is this instruction a
1702                                    // non-matching short branch variant of some
1703                                                             // long branch?
1704 
1705 //----------OPERANDS-----------------------------------------------------------
1706 // Operand definitions must precede instruction definitions for correct parsing
1707 // in the ADLC because operands constitute user defined types which are used in
1708 // instruction definitions.
1709 
1710 //----------Simple Operands----------------------------------------------------
1711 // Immediate Operands
1712 // Integer Immediate: 32-bit
1713 operand immI() %{
1714   match(ConI);
1715 
1716   op_cost(0);
1717   // formats are generated automatically for constants and base registers
1718   format %{ %}
1719   interface(CONST_INTER);
1720 %}
1721 
1722 // Integer Immediate: 8-bit unsigned - for VMOV
1723 operand immU8() %{
1724   predicate(0 <= n->get_int() && (n->get_int() <= 255));
1725   match(ConI);
1726   op_cost(0);
1727 
1728   format %{ %}
1729   interface(CONST_INTER);
1730 %}
1731 
1732 // Integer Immediate: 16-bit
1733 operand immI16() %{
1734   predicate((n->get_int() >> 16) == 0 && VM_Version::supports_movw());
1735   match(ConI);
1736   op_cost(0);
1737 
1738   format %{ %}
1739   interface(CONST_INTER);
1740 %}
1741 
1742 // Integer Immediate: offset for half and double word loads and stores
1743 operand immIHD() %{
1744   predicate(is_memoryHD(n->get_int()));
1745   match(ConI);
1746   op_cost(0);
1747   format %{ %}
1748   interface(CONST_INTER);
1749 %}
1750 
1751 // Integer Immediate: offset for fp loads and stores
1752 operand immIFP() %{
1753   predicate(is_memoryfp(n->get_int()) && ((n->get_int() & 3) == 0));
1754   match(ConI);
1755   op_cost(0);
1756 
1757   format %{ %}
1758   interface(CONST_INTER);
1759 %}
1760 
1761 // Valid scale values for addressing modes and shifts
1762 operand immU5() %{
1763   predicate(0 <= n->get_int() && (n->get_int() <= 31));
1764   match(ConI);
1765   op_cost(0);
1766 
1767   format %{ %}
1768   interface(CONST_INTER);
1769 %}
1770 
1771 // Integer Immediate: 6-bit
1772 operand immU6Big() %{
1773   predicate(n->get_int() >= 32 && n->get_int() <= 63);
1774   match(ConI);
1775   op_cost(0);
1776   format %{ %}
1777   interface(CONST_INTER);
1778 %}
1779 
1780 // Integer Immediate: 0-bit
1781 operand immI0() %{
1782   predicate(n->get_int() == 0);
1783   match(ConI);
1784   op_cost(0);
1785 
1786   format %{ %}
1787   interface(CONST_INTER);
1788 %}
1789 
1790 // Integer Immediate: the value 1
1791 operand immI_1() %{
1792   predicate(n->get_int() == 1);
1793   match(ConI);
1794   op_cost(0);
1795 
1796   format %{ %}
1797   interface(CONST_INTER);
1798 %}
1799 
1800 // Integer Immediate: the value 2
1801 operand immI_2() %{
1802   predicate(n->get_int() == 2);
1803   match(ConI);
1804   op_cost(0);
1805 
1806   format %{ %}
1807   interface(CONST_INTER);
1808 %}
1809 
1810 // Integer Immediate: the value 3
1811 operand immI_3() %{
1812   predicate(n->get_int() == 3);
1813   match(ConI);
1814   op_cost(0);
1815 
1816   format %{ %}
1817   interface(CONST_INTER);
1818 %}
1819 
1820 // Integer Immediate: the value 4
1821 operand immI_4() %{
1822   predicate(n->get_int() == 4);
1823   match(ConI);
1824   op_cost(0);
1825 
1826   format %{ %}
1827   interface(CONST_INTER);
1828 %}
1829 
1830 // Integer Immediate: the value 8
1831 operand immI_8() %{
1832   predicate(n->get_int() == 8);
1833   match(ConI);
1834   op_cost(0);
1835 
1836   format %{ %}
1837   interface(CONST_INTER);
1838 %}
1839 
1840 // Int Immediate non-negative
1841 operand immU31()
1842 %{
1843   predicate(n->get_int() >= 0);
1844   match(ConI);
1845 
1846   op_cost(0);
1847   format %{ %}
1848   interface(CONST_INTER);
1849 %}
1850 
1851 // Integer Immediate: the values 32-63
1852 operand immI_32_63() %{
1853   predicate(n->get_int() >= 32 && n->get_int() <= 63);
1854   match(ConI);
1855   op_cost(0);
1856 
1857   format %{ %}
1858   interface(CONST_INTER);
1859 %}
1860 
1861 // Immediates for special shifts (sign extend)
1862 
1863 // Integer Immediate: the value 16
1864 operand immI_16() %{
1865   predicate(n->get_int() == 16);
1866   match(ConI);
1867   op_cost(0);
1868 
1869   format %{ %}
1870   interface(CONST_INTER);
1871 %}
1872 
1873 // Integer Immediate: the value 24
1874 operand immI_24() %{
1875   predicate(n->get_int() == 24);
1876   match(ConI);
1877   op_cost(0);
1878 
1879   format %{ %}
1880   interface(CONST_INTER);
1881 %}
1882 
1883 // Integer Immediate: the value 255
1884 operand immI_255() %{
1885   predicate( n->get_int() == 255 );
1886   match(ConI);
1887   op_cost(0);
1888 
1889   format %{ %}
1890   interface(CONST_INTER);
1891 %}
1892 
1893 // Integer Immediate: the value 65535
1894 operand immI_65535() %{
1895   predicate(n->get_int() == 65535);
1896   match(ConI);
1897   op_cost(0);
1898 
1899   format %{ %}
1900   interface(CONST_INTER);
1901 %}
1902 
1903 // Integer Immediates for arithmetic instructions
1904 
1905 operand aimmI() %{
1906   predicate(is_aimm(n->get_int()));
1907   match(ConI);
1908   op_cost(0);
1909 
1910   format %{ %}
1911   interface(CONST_INTER);
1912 %}
1913 
1914 operand aimmIneg() %{
1915   predicate(is_aimm(-n->get_int()));
1916   match(ConI);
1917   op_cost(0);
1918 
1919   format %{ %}
1920   interface(CONST_INTER);
1921 %}
1922 
1923 operand aimmU31() %{
1924   predicate((0 <= n->get_int()) && is_aimm(n->get_int()));
1925   match(ConI);
1926   op_cost(0);
1927 
1928   format %{ %}
1929   interface(CONST_INTER);
1930 %}
1931 
1932 // Integer Immediates for logical instructions
1933 
1934 operand limmI() %{
1935   predicate(is_limmI(n->get_int()));
1936   match(ConI);
1937   op_cost(0);
1938 
1939   format %{ %}
1940   interface(CONST_INTER);
1941 %}
1942 
1943 operand limmIlow8() %{
1944   predicate(is_limmI_low(n->get_int(), 8));
1945   match(ConI);
1946   op_cost(0);
1947 
1948   format %{ %}
1949   interface(CONST_INTER);
1950 %}
1951 
1952 operand limmU31() %{
1953   predicate(0 <= n->get_int() && is_limmI(n->get_int()));
1954   match(ConI);
1955   op_cost(0);
1956 
1957   format %{ %}
1958   interface(CONST_INTER);
1959 %}
1960 
1961 operand limmIn() %{
1962   predicate(is_limmI(~n->get_int()));
1963   match(ConI);
1964   op_cost(0);
1965 
1966   format %{ %}
1967   interface(CONST_INTER);
1968 %}
1969 
1970 
1971 // Long Immediate: the value FF
1972 operand immL_FF() %{
1973   predicate( n->get_long() == 0xFFL );
1974   match(ConL);
1975   op_cost(0);
1976 
1977   format %{ %}
1978   interface(CONST_INTER);
1979 %}
1980 
1981 // Long Immediate: the value FFFF
1982 operand immL_FFFF() %{
1983   predicate( n->get_long() == 0xFFFFL );
1984   match(ConL);
1985   op_cost(0);
1986 
1987   format %{ %}
1988   interface(CONST_INTER);
1989 %}
1990 
1991 // Pointer Immediate: 32 or 64-bit
1992 operand immP() %{
1993   match(ConP);
1994 
1995   op_cost(5);
1996   // formats are generated automatically for constants and base registers
1997   format %{ %}
1998   interface(CONST_INTER);
1999 %}
2000 
2001 operand immP0() %{
2002   predicate(n->get_ptr() == 0);
2003   match(ConP);
2004   op_cost(0);
2005 
2006   format %{ %}
2007   interface(CONST_INTER);
2008 %}
2009 
2010 operand immP_poll() %{
2011   predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
2012   match(ConP);
2013 
2014   // formats are generated automatically for constants and base registers
2015   format %{ %}
2016   interface(CONST_INTER);
2017 %}
2018 
2019 // Pointer Immediate
2020 operand immN()
2021 %{
2022   match(ConN);
2023 
2024   op_cost(10);
2025   format %{ %}
2026   interface(CONST_INTER);
2027 %}
2028 
2029 operand immNKlass()
2030 %{
2031   match(ConNKlass);
2032 
2033   op_cost(10);
2034   format %{ %}
2035   interface(CONST_INTER);
2036 %}
2037 
2038 // NULL Pointer Immediate
2039 operand immN0()
2040 %{
2041   predicate(n->get_narrowcon() == 0);
2042   match(ConN);
2043 
2044   op_cost(0);
2045   format %{ %}
2046   interface(CONST_INTER);
2047 %}
2048 
2049 operand immL() %{
2050   match(ConL);
2051   op_cost(40);
2052   // formats are generated automatically for constants and base registers
2053   format %{ %}
2054   interface(CONST_INTER);
2055 %}
2056 
2057 operand immL0() %{
2058   predicate(n->get_long() == 0L);
2059   match(ConL);
2060   op_cost(0);
2061   // formats are generated automatically for constants and base registers
2062   format %{ %}
2063   interface(CONST_INTER);
2064 %}
2065 
2066 // Long Immediate: 16-bit
2067 operand immL16() %{
2068   predicate(n->get_long() >= 0 && n->get_long() < (1<<16)  && VM_Version::supports_movw());
2069   match(ConL);
2070   op_cost(0);
2071 
2072   format %{ %}
2073   interface(CONST_INTER);
2074 %}
2075 
2076 // Long Immediate: low 32-bit mask
2077 operand immL_32bits() %{
2078   predicate(n->get_long() == 0xFFFFFFFFL);
2079   match(ConL);
2080   op_cost(0);
2081 
2082   format %{ %}
2083   interface(CONST_INTER);
2084 %}
2085 
2086 // Double Immediate
2087 operand immD() %{
2088   match(ConD);
2089 
2090   op_cost(40);
2091   format %{ %}
2092   interface(CONST_INTER);
2093 %}
2094 
2095 // Double Immediate: +0.0d.
2096 operand immD0() %{
2097   predicate(jlong_cast(n->getd()) == 0);
2098 
2099   match(ConD);
2100   op_cost(0);
2101   format %{ %}
2102   interface(CONST_INTER);
2103 %}
2104 
2105 operand imm8D() %{
2106   predicate(Assembler::double_num(n->getd()).can_be_imm8());
2107   match(ConD);
2108 
2109   op_cost(0);
2110   format %{ %}
2111   interface(CONST_INTER);
2112 %}
2113 
2114 // Float Immediate
2115 operand immF() %{
2116   match(ConF);
2117 
2118   op_cost(20);
2119   format %{ %}
2120   interface(CONST_INTER);
2121 %}
2122 
2123 // Float Immediate: +0.0f
2124 operand immF0() %{
2125   predicate(jint_cast(n->getf()) == 0);
2126   match(ConF);
2127 
2128   op_cost(0);
2129   format %{ %}
2130   interface(CONST_INTER);
2131 %}
2132 
2133 // Float Immediate: encoded as 8 bits
2134 operand imm8F() %{
2135   predicate(Assembler::float_num(n->getf()).can_be_imm8());
2136   match(ConF);
2137 
2138   op_cost(0);
2139   format %{ %}
2140   interface(CONST_INTER);
2141 %}
2142 
2143 // Integer Register Operands
2144 // Integer Register
2145 operand iRegI() %{
2146   constraint(ALLOC_IN_RC(int_reg));
2147   match(RegI);
2148   match(R0RegI);
2149   match(R1RegI);
2150   match(R2RegI);
2151   match(R3RegI);
2152   match(R12RegI);
2153 
2154   format %{ %}
2155   interface(REG_INTER);
2156 %}
2157 
2158 // Pointer Register
2159 operand iRegP() %{
2160   constraint(ALLOC_IN_RC(ptr_reg));
2161   match(RegP);
2162   match(R0RegP);
2163   match(R1RegP);
2164   match(R2RegP);
2165   match(RExceptionRegP);
2166   match(R8RegP);
2167   match(R9RegP);
2168   match(RthreadRegP); // FIXME: move to sp_ptr_RegP?
2169   match(R12RegP);
2170   match(LRRegP);
2171 
2172   match(sp_ptr_RegP);
2173   match(store_ptr_RegP);
2174 
2175   format %{ %}
2176   interface(REG_INTER);
2177 %}
2178 
2179 // GPRs + Rthread + SP
2180 operand sp_ptr_RegP() %{
2181   constraint(ALLOC_IN_RC(sp_ptr_reg));
2182   match(RegP);
2183   match(iRegP);
2184   match(SPRegP); // FIXME: check cost
2185 
2186   format %{ %}
2187   interface(REG_INTER);
2188 %}
2189 
2190 
2191 operand R0RegP() %{
2192   constraint(ALLOC_IN_RC(R0_regP));
2193   match(iRegP);
2194 
2195   format %{ %}
2196   interface(REG_INTER);
2197 %}
2198 
2199 operand R1RegP() %{
2200   constraint(ALLOC_IN_RC(R1_regP));
2201   match(iRegP);
2202 
2203   format %{ %}
2204   interface(REG_INTER);
2205 %}
2206 
2207 operand R2RegP() %{
2208   constraint(ALLOC_IN_RC(R2_regP));
2209   match(iRegP);
2210 
2211   format %{ %}
2212   interface(REG_INTER);
2213 %}
2214 
2215 operand RExceptionRegP() %{
2216   constraint(ALLOC_IN_RC(Rexception_regP));
2217   match(iRegP);
2218 
2219   format %{ %}
2220   interface(REG_INTER);
2221 %}
2222 
2223 operand RthreadRegP() %{
2224   constraint(ALLOC_IN_RC(Rthread_regP));
2225   match(iRegP);
2226 
2227   format %{ %}
2228   interface(REG_INTER);
2229 %}
2230 
2231 operand IPRegP() %{
2232   constraint(ALLOC_IN_RC(IP_regP));
2233   match(iRegP);
2234 
2235   format %{ %}
2236   interface(REG_INTER);
2237 %}
2238 
2239 operand LRRegP() %{
2240   constraint(ALLOC_IN_RC(LR_regP));
2241   match(iRegP);
2242 
2243   format %{ %}
2244   interface(REG_INTER);
2245 %}
2246 
2247 operand R0RegI() %{
2248   constraint(ALLOC_IN_RC(R0_regI));
2249   match(iRegI);
2250 
2251   format %{ %}
2252   interface(REG_INTER);
2253 %}
2254 
2255 operand R1RegI() %{
2256   constraint(ALLOC_IN_RC(R1_regI));
2257   match(iRegI);
2258 
2259   format %{ %}
2260   interface(REG_INTER);
2261 %}
2262 
2263 operand R2RegI() %{
2264   constraint(ALLOC_IN_RC(R2_regI));
2265   match(iRegI);
2266 
2267   format %{ %}
2268   interface(REG_INTER);
2269 %}
2270 
2271 operand R3RegI() %{
2272   constraint(ALLOC_IN_RC(R3_regI));
2273   match(iRegI);
2274 
2275   format %{ %}
2276   interface(REG_INTER);
2277 %}
2278 
2279 operand R12RegI() %{
2280   constraint(ALLOC_IN_RC(R12_regI));
2281   match(iRegI);
2282 
2283   format %{ %}
2284   interface(REG_INTER);
2285 %}
2286 
2287 // Long Register
2288 operand iRegL() %{
2289   constraint(ALLOC_IN_RC(long_reg));
2290   match(RegL);
2291   match(R0R1RegL);
2292   match(R2R3RegL);
2293 //match(iRegLex);
2294 
2295   format %{ %}
2296   interface(REG_INTER);
2297 %}
2298 
2299 operand iRegLd() %{
2300   constraint(ALLOC_IN_RC(long_reg_align));
2301   match(iRegL); // FIXME: allows unaligned R11/R12?
2302 
2303   format %{ %}
2304   interface(REG_INTER);
2305 %}
2306 
2307 // first long arg, or return value
2308 operand R0R1RegL() %{
2309   constraint(ALLOC_IN_RC(R0R1_regL));
2310   match(iRegL);
2311 
2312   format %{ %}
2313   interface(REG_INTER);
2314 %}
2315 
2316 operand R2R3RegL() %{
2317   constraint(ALLOC_IN_RC(R2R3_regL));
2318   match(iRegL);
2319 
2320   format %{ %}
2321   interface(REG_INTER);
2322 %}
2323 
2324 // Condition Code Flag Register
2325 operand flagsReg() %{
2326   constraint(ALLOC_IN_RC(int_flags));
2327   match(RegFlags);
2328 
2329   format %{ "apsr" %}
2330   interface(REG_INTER);
2331 %}
2332 
2333 // Result of compare to 0 (TST)
2334 operand flagsReg_EQNELTGE() %{
2335   constraint(ALLOC_IN_RC(int_flags));
2336   match(RegFlags);
2337 
2338   format %{ "apsr_EQNELTGE" %}
2339   interface(REG_INTER);
2340 %}
2341 
2342 // Condition Code Register, unsigned comparisons.
2343 operand flagsRegU() %{
2344   constraint(ALLOC_IN_RC(int_flags));
2345   match(RegFlags);
2346 #ifdef TODO
2347   match(RegFlagsP);
2348 #endif
2349 
2350   format %{ "apsr_U" %}
2351   interface(REG_INTER);
2352 %}
2353 
2354 // Condition Code Register, pointer comparisons.
2355 operand flagsRegP() %{
2356   constraint(ALLOC_IN_RC(int_flags));
2357   match(RegFlags);
2358 
2359   format %{ "apsr_P" %}
2360   interface(REG_INTER);
2361 %}
2362 
2363 // Condition Code Register, long comparisons.
2364 operand flagsRegL_LTGE() %{
2365   constraint(ALLOC_IN_RC(int_flags));
2366   match(RegFlags);
2367 
2368   format %{ "apsr_L_LTGE" %}
2369   interface(REG_INTER);
2370 %}
2371 
2372 operand flagsRegL_EQNE() %{
2373   constraint(ALLOC_IN_RC(int_flags));
2374   match(RegFlags);
2375 
2376   format %{ "apsr_L_EQNE" %}
2377   interface(REG_INTER);
2378 %}
2379 
2380 operand flagsRegL_LEGT() %{
2381   constraint(ALLOC_IN_RC(int_flags));
2382   match(RegFlags);
2383 
2384   format %{ "apsr_L_LEGT" %}
2385   interface(REG_INTER);
2386 %}
2387 
2388 operand flagsRegUL_LTGE() %{
2389   constraint(ALLOC_IN_RC(int_flags));
2390   match(RegFlags);
2391 
2392   format %{ "apsr_UL_LTGE" %}
2393   interface(REG_INTER);
2394 %}
2395 
2396 operand flagsRegUL_EQNE() %{
2397   constraint(ALLOC_IN_RC(int_flags));
2398   match(RegFlags);
2399 
2400   format %{ "apsr_UL_EQNE" %}
2401   interface(REG_INTER);
2402 %}
2403 
2404 operand flagsRegUL_LEGT() %{
2405   constraint(ALLOC_IN_RC(int_flags));
2406   match(RegFlags);
2407 
2408   format %{ "apsr_UL_LEGT" %}
2409   interface(REG_INTER);
2410 %}
2411 
2412 // Condition Code Register, floating comparisons, unordered same as "less".
2413 operand flagsRegF() %{
2414   constraint(ALLOC_IN_RC(float_flags));
2415   match(RegFlags);
2416 
2417   format %{ "fpscr_F" %}
2418   interface(REG_INTER);
2419 %}
2420 
2421 // Vectors
2422 operand vecD() %{
2423   constraint(ALLOC_IN_RC(actual_dflt_reg));
2424   match(VecD);
2425 
2426   format %{ %}
2427   interface(REG_INTER);
2428 %}
2429 
2430 operand vecX() %{
2431   constraint(ALLOC_IN_RC(vectorx_reg));
2432   match(VecX);
2433 
2434   format %{ %}
2435   interface(REG_INTER);
2436 %}
2437 
2438 operand regD() %{
2439   constraint(ALLOC_IN_RC(actual_dflt_reg));
2440   match(RegD);
2441   match(regD_low);
2442 
2443   format %{ %}
2444   interface(REG_INTER);
2445 %}
2446 
2447 operand regF() %{
2448   constraint(ALLOC_IN_RC(sflt_reg));
2449   match(RegF);
2450 
2451   format %{ %}
2452   interface(REG_INTER);
2453 %}
2454 
2455 operand regD_low() %{
2456   constraint(ALLOC_IN_RC(dflt_low_reg));
2457   match(RegD);
2458 
2459   format %{ %}
2460   interface(REG_INTER);
2461 %}
2462 
2463 // Special Registers
2464 
2465 // Method Register
2466 operand inline_cache_regP(iRegP reg) %{
2467   constraint(ALLOC_IN_RC(Ricklass_regP));
2468   match(reg);
2469   format %{ %}
2470   interface(REG_INTER);
2471 %}
2472 
2473 operand interpreter_method_oop_regP(iRegP reg) %{
2474   constraint(ALLOC_IN_RC(Rmethod_regP));
2475   match(reg);
2476   format %{ %}
2477   interface(REG_INTER);
2478 %}
2479 
2480 
2481 //----------Complex Operands---------------------------------------------------
2482 // Indirect Memory Reference
2483 operand indirect(sp_ptr_RegP reg) %{
2484   constraint(ALLOC_IN_RC(sp_ptr_reg));
2485   match(reg);
2486 
2487   op_cost(100);
2488   format %{ "[$reg]" %}
2489   interface(MEMORY_INTER) %{
2490     base($reg);
2491     index(0xf); // PC => no index
2492     scale(0x0);
2493     disp(0x0);
2494   %}
2495 %}
2496 
2497 
2498 // Indirect with Offset in ]-4096, 4096[
2499 operand indOffset12(sp_ptr_RegP reg, immI12 offset) %{
2500   constraint(ALLOC_IN_RC(sp_ptr_reg));
2501   match(AddP reg offset);
2502 
2503   op_cost(100);
2504   format %{ "[$reg + $offset]" %}
2505   interface(MEMORY_INTER) %{
2506     base($reg);
2507     index(0xf); // PC => no index
2508     scale(0x0);
2509     disp($offset);
2510   %}
2511 %}
2512 
2513 // Indirect with offset for float load/store
2514 operand indOffsetFP(sp_ptr_RegP reg, immIFP offset) %{
2515   constraint(ALLOC_IN_RC(sp_ptr_reg));
2516   match(AddP reg offset);
2517 
2518   op_cost(100);
2519   format %{ "[$reg + $offset]" %}
2520   interface(MEMORY_INTER) %{
2521     base($reg);
2522     index(0xf); // PC => no index
2523     scale(0x0);
2524     disp($offset);
2525   %}
2526 %}
2527 
2528 // Indirect with Offset for half and double words
2529 operand indOffsetHD(sp_ptr_RegP reg, immIHD offset) %{
2530   constraint(ALLOC_IN_RC(sp_ptr_reg));
2531   match(AddP reg offset);
2532 
2533   op_cost(100);
2534   format %{ "[$reg + $offset]" %}
2535   interface(MEMORY_INTER) %{
2536     base($reg);
2537     index(0xf); // PC => no index
2538     scale(0x0);
2539     disp($offset);
2540   %}
2541 %}
2542 
2543 // Indirect with Offset and Offset+4 in ]-1024, 1024[
2544 operand indOffsetFPx2(sp_ptr_RegP reg, immX10x2 offset) %{
2545   constraint(ALLOC_IN_RC(sp_ptr_reg));
2546   match(AddP reg offset);
2547 
2548   op_cost(100);
2549   format %{ "[$reg + $offset]" %}
2550   interface(MEMORY_INTER) %{
2551     base($reg);
2552     index(0xf); // PC => no index
2553     scale(0x0);
2554     disp($offset);
2555   %}
2556 %}
2557 
2558 // Indirect with Offset and Offset+4 in ]-4096, 4096[
2559 operand indOffset12x2(sp_ptr_RegP reg, immI12x2 offset) %{
2560   constraint(ALLOC_IN_RC(sp_ptr_reg));
2561   match(AddP reg offset);
2562 
2563   op_cost(100);
2564   format %{ "[$reg + $offset]" %}
2565   interface(MEMORY_INTER) %{
2566     base($reg);
2567     index(0xf); // PC => no index
2568     scale(0x0);
2569     disp($offset);
2570   %}
2571 %}
2572 
2573 // Indirect with Register Index
2574 operand indIndex(iRegP addr, iRegX index) %{
2575   constraint(ALLOC_IN_RC(ptr_reg));
2576   match(AddP addr index);
2577 
2578   op_cost(100);
2579   format %{ "[$addr + $index]" %}
2580   interface(MEMORY_INTER) %{
2581     base($addr);
2582     index($index);
2583     scale(0x0);
2584     disp(0x0);
2585   %}
2586 %}
2587 
2588 // Indirect Memory Times Scale Plus Index Register
2589 operand indIndexScale(iRegP addr, iRegX index, immU5 scale) %{
2590   constraint(ALLOC_IN_RC(ptr_reg));
2591   match(AddP addr (LShiftX index scale));
2592 
2593   op_cost(100);
2594   format %{"[$addr + $index << $scale]" %}
2595   interface(MEMORY_INTER) %{
2596     base($addr);
2597     index($index);
2598     scale($scale);
2599     disp(0x0);
2600   %}
2601 %}
2602 
2603 // Operands for expressing Control Flow
2604 // NOTE:  Label is a predefined operand which should not be redefined in
2605 //        the AD file.  It is generically handled within the ADLC.
2606 
2607 //----------Conditional Branch Operands----------------------------------------
2608 // Comparison Op  - This is the operation of the comparison, and is limited to
2609 //                  the following set of codes:
2610 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
2611 //
2612 // Other attributes of the comparison, such as unsignedness, are specified
2613 // by the comparison instruction that sets a condition code flags register.
2614 // That result is represented by a flags operand whose subtype is appropriate
2615 // to the unsignedness (etc.) of the comparison.
2616 //
2617 // Later, the instruction which matches both the Comparison Op (a Bool) and
2618 // the flags (produced by the Cmp) specifies the coding of the comparison op
2619 // by matching a specific subtype of Bool operand below, such as cmpOpU.
2620 
2621 operand cmpOp() %{
2622   match(Bool);
2623 
2624   format %{ "" %}
2625   interface(COND_INTER) %{
2626     equal(0x0);
2627     not_equal(0x1);
2628     less(0xb);
2629     greater_equal(0xa);
2630     less_equal(0xd);
2631     greater(0xc);
2632     overflow(0x0); // unsupported/unimplemented
2633     no_overflow(0x0); // unsupported/unimplemented
2634   %}
2635 %}
2636 
2637 // integer comparison with 0, signed
2638 operand cmpOp0() %{
2639   match(Bool);
2640 
2641   format %{ "" %}
2642   interface(COND_INTER) %{
2643     equal(0x0);
2644     not_equal(0x1);
2645     less(0x4);
2646     greater_equal(0x5);
2647     less_equal(0xd); // unsupported
2648     greater(0xc); // unsupported
2649     overflow(0x0); // unsupported/unimplemented
2650     no_overflow(0x0); // unsupported/unimplemented
2651   %}
2652 %}
2653 
2654 // Comparison Op, unsigned
2655 operand cmpOpU() %{
2656   match(Bool);
2657 
2658   format %{ "u" %}
2659   interface(COND_INTER) %{
2660     equal(0x0);
2661     not_equal(0x1);
2662     less(0x3);
2663     greater_equal(0x2);
2664     less_equal(0x9);
2665     greater(0x8);
2666     overflow(0x0); // unsupported/unimplemented
2667     no_overflow(0x0); // unsupported/unimplemented
2668   %}
2669 %}
2670 
2671 // Comparison Op, pointer (same as unsigned)
2672 operand cmpOpP() %{
2673   match(Bool);
2674 
2675   format %{ "p" %}
2676   interface(COND_INTER) %{
2677     equal(0x0);
2678     not_equal(0x1);
2679     less(0x3);
2680     greater_equal(0x2);
2681     less_equal(0x9);
2682     greater(0x8);
2683     overflow(0x0); // unsupported/unimplemented
2684     no_overflow(0x0); // unsupported/unimplemented
2685   %}
2686 %}
2687 
2688 operand cmpOpL() %{
2689   match(Bool);
2690 
2691   format %{ "L" %}
2692   interface(COND_INTER) %{
2693     equal(0x0);
2694     not_equal(0x1);
2695     less(0xb);
2696     greater_equal(0xa);
2697     less_equal(0xd);
2698     greater(0xc);
2699     overflow(0x0); // unsupported/unimplemented
2700     no_overflow(0x0); // unsupported/unimplemented
2701   %}
2702 %}
2703 
2704 operand cmpOpL_commute() %{
2705   match(Bool);
2706 
2707   format %{ "L" %}
2708   interface(COND_INTER) %{
2709     equal(0x0);
2710     not_equal(0x1);
2711     less(0xc);
2712     greater_equal(0xd);
2713     less_equal(0xa);
2714     greater(0xb);
2715     overflow(0x0); // unsupported/unimplemented
2716     no_overflow(0x0); // unsupported/unimplemented
2717   %}
2718 %}
2719 
2720 operand cmpOpUL() %{
2721   match(Bool);
2722 
2723   format %{ "UL" %}
2724   interface(COND_INTER) %{
2725     equal(0x0);
2726     not_equal(0x1);
2727     less(0x3);
2728     greater_equal(0x2);
2729     less_equal(0x9);
2730     greater(0x8);
2731     overflow(0x0); // unsupported/unimplemented
2732     no_overflow(0x0); // unsupported/unimplemented
2733   %}
2734 %}
2735 
2736 operand cmpOpUL_commute() %{
2737   match(Bool);
2738 
2739   format %{ "UL" %}
2740   interface(COND_INTER) %{
2741     equal(0x0);
2742     not_equal(0x1);
2743     less(0x8);
2744     greater_equal(0x9);
2745     less_equal(0x2);
2746     greater(0x3);
2747     overflow(0x0); // unsupported/unimplemented
2748     no_overflow(0x0); // unsupported/unimplemented
2749   %}
2750 %}
2751 
2752 
2753 //----------OPERAND CLASSES----------------------------------------------------
2754 // Operand Classes are groups of operands that are used to simplify
2755 // instruction definitions by not requiring the AD writer to specify separate
2756 // instructions for every form of operand when the instruction accepts
2757 // multiple operand types with the same basic encoding and format.  The classic
2758 // case of this is memory operands.
2759 
2760 opclass memoryI ( indirect, indOffset12, indIndex, indIndexScale );
2761 opclass memoryP ( indirect, indOffset12, indIndex, indIndexScale );
2762 opclass memoryF ( indirect, indOffsetFP );
2763 opclass memoryF2 ( indirect, indOffsetFPx2 );
2764 opclass memoryD ( indirect, indOffsetFP );
2765 opclass memoryfp( indirect, indOffsetFP );
2766 opclass memoryB ( indirect, indIndex, indOffsetHD );
2767 opclass memoryS ( indirect, indIndex, indOffsetHD );
2768 opclass memoryL ( indirect, indIndex, indOffsetHD );
2769 
2770 opclass memoryScaledI(indIndexScale);
2771 opclass memoryScaledP(indIndexScale);
2772 
2773 // when ldrex/strex is used:
2774 opclass memoryex ( indirect );
2775 opclass indIndexMemory( indIndex );
2776 opclass memorylong ( indirect, indOffset12x2 );
2777 opclass memoryvld ( indirect /* , write back mode not implemented */ );
2778 
2779 //----------PIPELINE-----------------------------------------------------------
2780 pipeline %{
2781 
2782 //----------ATTRIBUTES---------------------------------------------------------
2783 attributes %{
2784   fixed_size_instructions;           // Fixed size instructions
2785   max_instructions_per_bundle = 4;   // Up to 4 instructions per bundle
2786   instruction_unit_size = 4;         // An instruction is 4 bytes long
2787   instruction_fetch_unit_size = 16;  // The processor fetches one line
2788   instruction_fetch_units = 1;       // of 16 bytes
2789 
2790   // List of nop instructions
2791   nops( Nop_A0, Nop_A1, Nop_MS, Nop_FA, Nop_BR );
2792 %}
2793 
2794 //----------RESOURCES----------------------------------------------------------
2795 // Resources are the functional units available to the machine
2796 resources(A0, A1, MS, BR, FA, FM, IDIV, FDIV, IALU = A0 | A1);
2797 
2798 //----------PIPELINE DESCRIPTION-----------------------------------------------
2799 // Pipeline Description specifies the stages in the machine's pipeline
2800 
2801 pipe_desc(A, P, F, B, I, J, S, R, E, C, M, W, X, T, D);
2802 
2803 //----------PIPELINE CLASSES---------------------------------------------------
2804 // Pipeline Classes describe the stages in which input and output are
2805 // referenced by the hardware pipeline.
2806 
2807 // Integer ALU reg-reg operation
2808 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
2809     single_instruction;
2810     dst   : E(write);
2811     src1  : R(read);
2812     src2  : R(read);
2813     IALU  : R;
2814 %}
2815 
2816 // Integer ALU reg-reg long operation
2817 pipe_class ialu_reg_reg_2(iRegL dst, iRegL src1, iRegL src2) %{
2818     instruction_count(2);
2819     dst   : E(write);
2820     src1  : R(read);
2821     src2  : R(read);
2822     IALU  : R;
2823     IALU  : R;
2824 %}
2825 
2826 // Integer ALU reg-reg long dependent operation
2827 pipe_class ialu_reg_reg_2_dep(iRegL dst, iRegL src1, iRegL src2, flagsReg cr) %{
2828     instruction_count(1); multiple_bundles;
2829     dst   : E(write);
2830     src1  : R(read);
2831     src2  : R(read);
2832     cr    : E(write);
2833     IALU  : R(2);
2834 %}
2835 
2836 // Integer ALU reg-imm operaion
2837 pipe_class ialu_reg_imm(iRegI dst, iRegI src1) %{
2838     single_instruction;
2839     dst   : E(write);
2840     src1  : R(read);
2841     IALU  : R;
2842 %}
2843 
2844 // Integer ALU reg-reg operation with condition code
2845 pipe_class ialu_cc_reg_reg(iRegI dst, iRegI src1, iRegI src2, flagsReg cr) %{
2846     single_instruction;
2847     dst   : E(write);
2848     cr    : E(write);
2849     src1  : R(read);
2850     src2  : R(read);
2851     IALU  : R;
2852 %}
2853 
2854 // Integer ALU zero-reg operation
2855 pipe_class ialu_zero_reg(iRegI dst, immI0 zero, iRegI src2) %{
2856     single_instruction;
2857     dst   : E(write);
2858     src2  : R(read);
2859     IALU  : R;
2860 %}
2861 
2862 // Integer ALU zero-reg operation with condition code only
2863 pipe_class ialu_cconly_zero_reg(flagsReg cr, iRegI src) %{
2864     single_instruction;
2865     cr    : E(write);
2866     src   : R(read);
2867     IALU  : R;
2868 %}
2869 
2870 // Integer ALU reg-reg operation with condition code only
2871 pipe_class ialu_cconly_reg_reg(flagsReg cr, iRegI src1, iRegI src2) %{
2872     single_instruction;
2873     cr    : E(write);
2874     src1  : R(read);
2875     src2  : R(read);
2876     IALU  : R;
2877 %}
2878 
2879 // Integer ALU reg-imm operation with condition code only
2880 pipe_class ialu_cconly_reg_imm(flagsReg cr, iRegI src1) %{
2881     single_instruction;
2882     cr    : E(write);
2883     src1  : R(read);
2884     IALU  : R;
2885 %}
2886 
2887 // Integer ALU reg-reg-zero operation with condition code only
2888 pipe_class ialu_cconly_reg_reg_zero(flagsReg cr, iRegI src1, iRegI src2, immI0 zero) %{
2889     single_instruction;
2890     cr    : E(write);
2891     src1  : R(read);
2892     src2  : R(read);
2893     IALU  : R;
2894 %}
2895 
2896 // Integer ALU reg-imm-zero operation with condition code only
2897 pipe_class ialu_cconly_reg_imm_zero(flagsReg cr, iRegI src1, immI0 zero) %{
2898     single_instruction;
2899     cr    : E(write);
2900     src1  : R(read);
2901     IALU  : R;
2902 %}
2903 
2904 // Integer ALU reg-reg operation with condition code, src1 modified
2905 pipe_class ialu_cc_rwreg_reg(flagsReg cr, iRegI src1, iRegI src2) %{
2906     single_instruction;
2907     cr    : E(write);
2908     src1  : E(write);
2909     src1  : R(read);
2910     src2  : R(read);
2911     IALU  : R;
2912 %}
2913 
2914 pipe_class cmpL_reg(iRegI dst, iRegL src1, iRegL src2, flagsReg cr ) %{
2915     multiple_bundles;
2916     dst   : E(write)+4;
2917     cr    : E(write);
2918     src1  : R(read);
2919     src2  : R(read);
2920     IALU  : R(3);
2921     BR    : R(2);
2922 %}
2923 
2924 // Integer ALU operation
2925 pipe_class ialu_none(iRegI dst) %{
2926     single_instruction;
2927     dst   : E(write);
2928     IALU  : R;
2929 %}
2930 
2931 // Integer ALU reg operation
2932 pipe_class ialu_reg(iRegI dst, iRegI src) %{
2933     single_instruction; may_have_no_code;
2934     dst   : E(write);
2935     src   : R(read);
2936     IALU  : R;
2937 %}
2938 
2939 // Integer ALU reg conditional operation
2940 // This instruction has a 1 cycle stall, and cannot execute
2941 // in the same cycle as the instruction setting the condition
2942 // code. We kludge this by pretending to read the condition code
2943 // 1 cycle earlier, and by marking the functional units as busy
2944 // for 2 cycles with the result available 1 cycle later than
2945 // is really the case.
2946 pipe_class ialu_reg_flags( iRegI op2_out, iRegI op2_in, iRegI op1, flagsReg cr ) %{
2947     single_instruction;
2948     op2_out : C(write);
2949     op1     : R(read);
2950     cr      : R(read);       // This is really E, with a 1 cycle stall
2951     BR      : R(2);
2952     MS      : R(2);
2953 %}
2954 
2955 // Integer ALU reg operation
2956 pipe_class ialu_move_reg_L_to_I(iRegI dst, iRegL src) %{
2957     single_instruction; may_have_no_code;
2958     dst   : E(write);
2959     src   : R(read);
2960     IALU  : R;
2961 %}
2962 pipe_class ialu_move_reg_I_to_L(iRegL dst, iRegI src) %{
2963     single_instruction; may_have_no_code;
2964     dst   : E(write);
2965     src   : R(read);
2966     IALU  : R;
2967 %}
2968 
2969 // Two integer ALU reg operations
2970 pipe_class ialu_reg_2(iRegL dst, iRegL src) %{
2971     instruction_count(2);
2972     dst   : E(write);
2973     src   : R(read);
2974     A0    : R;
2975     A1    : R;
2976 %}
2977 
2978 // Two integer ALU reg operations
2979 pipe_class ialu_move_reg_L_to_L(iRegL dst, iRegL src) %{
2980     instruction_count(2); may_have_no_code;
2981     dst   : E(write);
2982     src   : R(read);
2983     A0    : R;
2984     A1    : R;
2985 %}
2986 
2987 // Integer ALU imm operation
2988 pipe_class ialu_imm(iRegI dst) %{
2989     single_instruction;
2990     dst   : E(write);
2991     IALU  : R;
2992 %}
2993 
2994 pipe_class ialu_imm_n(iRegI dst) %{
2995     single_instruction;
2996     dst   : E(write);
2997     IALU  : R;
2998 %}
2999 
3000 // Integer ALU reg-reg with carry operation
3001 pipe_class ialu_reg_reg_cy(iRegI dst, iRegI src1, iRegI src2, iRegI cy) %{
3002     single_instruction;
3003     dst   : E(write);
3004     src1  : R(read);
3005     src2  : R(read);
3006     IALU  : R;
3007 %}
3008 
3009 // Integer ALU cc operation
3010 pipe_class ialu_cc(iRegI dst, flagsReg cc) %{
3011     single_instruction;
3012     dst   : E(write);
3013     cc    : R(read);
3014     IALU  : R;
3015 %}
3016 
3017 // Integer ALU cc / second IALU operation
3018 pipe_class ialu_reg_ialu( iRegI dst, iRegI src ) %{
3019     instruction_count(1); multiple_bundles;
3020     dst   : E(write)+1;
3021     src   : R(read);
3022     IALU  : R;
3023 %}
3024 
3025 // Integer ALU cc / second IALU operation
3026 pipe_class ialu_reg_reg_ialu( iRegI dst, iRegI p, iRegI q ) %{
3027     instruction_count(1); multiple_bundles;
3028     dst   : E(write)+1;
3029     p     : R(read);
3030     q     : R(read);
3031     IALU  : R;
3032 %}
3033 
3034 // Integer ALU hi-lo-reg operation
3035 pipe_class ialu_hi_lo_reg(iRegI dst, immI src) %{
3036     instruction_count(1); multiple_bundles;
3037     dst   : E(write)+1;
3038     IALU  : R(2);
3039 %}
3040 
3041 // Long Constant
3042 pipe_class loadConL( iRegL dst, immL src ) %{
3043     instruction_count(2); multiple_bundles;
3044     dst   : E(write)+1;
3045     IALU  : R(2);
3046     IALU  : R(2);
3047 %}
3048 
3049 // Pointer Constant
3050 pipe_class loadConP( iRegP dst, immP src ) %{
3051     instruction_count(0); multiple_bundles;
3052     fixed_latency(6);
3053 %}
3054 
3055 // Polling Address
3056 pipe_class loadConP_poll( iRegP dst, immP_poll src ) %{
3057     dst   : E(write);
3058     IALU  : R;
3059 %}
3060 
3061 // Long Constant small
3062 pipe_class loadConLlo( iRegL dst, immL src ) %{
3063     instruction_count(2);
3064     dst   : E(write);
3065     IALU  : R;
3066     IALU  : R;
3067 %}
3068 
3069 // [PHH] This is wrong for 64-bit.  See LdImmF/D.
3070 pipe_class loadConFD(regF dst, immF src, iRegP tmp) %{
3071     instruction_count(1); multiple_bundles;
3072     src   : R(read);
3073     dst   : M(write)+1;
3074     IALU  : R;
3075     MS    : E;
3076 %}
3077 
3078 // Integer ALU nop operation
3079 pipe_class ialu_nop() %{
3080     single_instruction;
3081     IALU  : R;
3082 %}
3083 
3084 // Integer ALU nop operation
3085 pipe_class ialu_nop_A0() %{
3086     single_instruction;
3087     A0    : R;
3088 %}
3089 
3090 // Integer ALU nop operation
3091 pipe_class ialu_nop_A1() %{
3092     single_instruction;
3093     A1    : R;
3094 %}
3095 
3096 // Integer Multiply reg-reg operation
3097 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
3098     single_instruction;
3099     dst   : E(write);
3100     src1  : R(read);
3101     src2  : R(read);
3102     MS    : R(5);
3103 %}
3104 
3105 pipe_class mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
3106     single_instruction;
3107     dst   : E(write)+4;
3108     src1  : R(read);
3109     src2  : R(read);
3110     MS    : R(6);
3111 %}
3112 
3113 // Integer Divide reg-reg
3114 pipe_class sdiv_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI temp, flagsReg cr) %{
3115     instruction_count(1); multiple_bundles;
3116     dst   : E(write);
3117     temp  : E(write);
3118     src1  : R(read);
3119     src2  : R(read);
3120     temp  : R(read);
3121     MS    : R(38);
3122 %}
3123 
3124 // Long Divide
3125 pipe_class divL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
3126     dst  : E(write)+71;
3127     src1 : R(read);
3128     src2 : R(read)+1;
3129     MS   : R(70);
3130 %}
3131 
3132 // Floating Point Add Float
3133 pipe_class faddF_reg_reg(regF dst, regF src1, regF src2) %{
3134     single_instruction;
3135     dst   : X(write);
3136     src1  : E(read);
3137     src2  : E(read);
3138     FA    : R;
3139 %}
3140 
3141 // Floating Point Add Double
3142 pipe_class faddD_reg_reg(regD dst, regD src1, regD src2) %{
3143     single_instruction;
3144     dst   : X(write);
3145     src1  : E(read);
3146     src2  : E(read);
3147     FA    : R;
3148 %}
3149 
3150 // Floating Point Conditional Move based on integer flags
3151 pipe_class int_conditional_float_move (cmpOp cmp, flagsReg cr, regF dst, regF src) %{
3152     single_instruction;
3153     dst   : X(write);
3154     src   : E(read);
3155     cr    : R(read);
3156     FA    : R(2);
3157     BR    : R(2);
3158 %}
3159 
3160 // Floating Point Conditional Move based on integer flags
3161 pipe_class int_conditional_double_move (cmpOp cmp, flagsReg cr, regD dst, regD src) %{
3162     single_instruction;
3163     dst   : X(write);
3164     src   : E(read);
3165     cr    : R(read);
3166     FA    : R(2);
3167     BR    : R(2);
3168 %}
3169 
3170 // Floating Point Multiply Float
3171 pipe_class fmulF_reg_reg(regF dst, regF src1, regF src2) %{
3172     single_instruction;
3173     dst   : X(write);
3174     src1  : E(read);
3175     src2  : E(read);
3176     FM    : R;
3177 %}
3178 
3179 // Floating Point Multiply Double
3180 pipe_class fmulD_reg_reg(regD dst, regD src1, regD src2) %{
3181     single_instruction;
3182     dst   : X(write);
3183     src1  : E(read);
3184     src2  : E(read);
3185     FM    : R;
3186 %}
3187 
3188 // Floating Point Divide Float
3189 pipe_class fdivF_reg_reg(regF dst, regF src1, regF src2) %{
3190     single_instruction;
3191     dst   : X(write);
3192     src1  : E(read);
3193     src2  : E(read);
3194     FM    : R;
3195     FDIV  : C(14);
3196 %}
3197 
3198 // Floating Point Divide Double
3199 pipe_class fdivD_reg_reg(regD dst, regD src1, regD src2) %{
3200     single_instruction;
3201     dst   : X(write);
3202     src1  : E(read);
3203     src2  : E(read);
3204     FM    : R;
3205     FDIV  : C(17);
3206 %}
3207 
3208 // Floating Point Move/Negate/Abs Float
3209 pipe_class faddF_reg(regF dst, regF src) %{
3210     single_instruction;
3211     dst   : W(write);
3212     src   : E(read);
3213     FA    : R(1);
3214 %}
3215 
3216 // Floating Point Move/Negate/Abs Double
3217 pipe_class faddD_reg(regD dst, regD src) %{
3218     single_instruction;
3219     dst   : W(write);
3220     src   : E(read);
3221     FA    : R;
3222 %}
3223 
3224 // Floating Point Convert F->D
3225 pipe_class fcvtF2D(regD dst, regF src) %{
3226     single_instruction;
3227     dst   : X(write);
3228     src   : E(read);
3229     FA    : R;
3230 %}
3231 
3232 // Floating Point Convert I->D
3233 pipe_class fcvtI2D(regD dst, regF src) %{
3234     single_instruction;
3235     dst   : X(write);
3236     src   : E(read);
3237     FA    : R;
3238 %}
3239 
3240 // Floating Point Convert LHi->D
3241 pipe_class fcvtLHi2D(regD dst, regD src) %{
3242     single_instruction;
3243     dst   : X(write);
3244     src   : E(read);
3245     FA    : R;
3246 %}
3247 
3248 // Floating Point Convert L->D
3249 pipe_class fcvtL2D(regD dst, iRegL src) %{
3250     single_instruction;
3251     dst   : X(write);
3252     src   : E(read);
3253     FA    : R;
3254 %}
3255 
3256 // Floating Point Convert L->F
3257 pipe_class fcvtL2F(regF dst, iRegL src) %{
3258     single_instruction;
3259     dst   : X(write);
3260     src   : E(read);
3261     FA    : R;
3262 %}
3263 
3264 // Floating Point Convert D->F
3265 pipe_class fcvtD2F(regD dst, regF src) %{
3266     single_instruction;
3267     dst   : X(write);
3268     src   : E(read);
3269     FA    : R;
3270 %}
3271 
3272 // Floating Point Convert I->L
3273 pipe_class fcvtI2L(regD dst, regF src) %{
3274     single_instruction;
3275     dst   : X(write);
3276     src   : E(read);
3277     FA    : R;
3278 %}
3279 
3280 // Floating Point Convert D->F
3281 pipe_class fcvtD2I(iRegI dst, regD src, flagsReg cr) %{
3282     instruction_count(1); multiple_bundles;
3283     dst   : X(write)+6;
3284     src   : E(read);
3285     FA    : R;
3286 %}
3287 
3288 // Floating Point Convert D->L
3289 pipe_class fcvtD2L(regD dst, regD src, flagsReg cr) %{
3290     instruction_count(1); multiple_bundles;
3291     dst   : X(write)+6;
3292     src   : E(read);
3293     FA    : R;
3294 %}
3295 
3296 // Floating Point Convert F->I
3297 pipe_class fcvtF2I(regF dst, regF src, flagsReg cr) %{
3298     instruction_count(1); multiple_bundles;
3299     dst   : X(write)+6;
3300     src   : E(read);
3301     FA    : R;
3302 %}
3303 
3304 // Floating Point Convert F->L
3305 pipe_class fcvtF2L(regD dst, regF src, flagsReg cr) %{
3306     instruction_count(1); multiple_bundles;
3307     dst   : X(write)+6;
3308     src   : E(read);
3309     FA    : R;
3310 %}
3311 
3312 // Floating Point Convert I->F
3313 pipe_class fcvtI2F(regF dst, regF src) %{
3314     single_instruction;
3315     dst   : X(write);
3316     src   : E(read);
3317     FA    : R;
3318 %}
3319 
3320 // Floating Point Compare
3321 pipe_class faddF_fcc_reg_reg_zero(flagsRegF cr, regF src1, regF src2, immI0 zero) %{
3322     single_instruction;
3323     cr    : X(write);
3324     src1  : E(read);
3325     src2  : E(read);
3326     FA    : R;
3327 %}
3328 
3329 // Floating Point Compare
3330 pipe_class faddD_fcc_reg_reg_zero(flagsRegF cr, regD src1, regD src2, immI0 zero) %{
3331     single_instruction;
3332     cr    : X(write);
3333     src1  : E(read);
3334     src2  : E(read);
3335     FA    : R;
3336 %}
3337 
3338 // Floating Add Nop
3339 pipe_class fadd_nop() %{
3340     single_instruction;
3341     FA  : R;
3342 %}
3343 
3344 // Integer Store to Memory
3345 pipe_class istore_mem_reg(memoryI mem, iRegI src) %{
3346     single_instruction;
3347     mem   : R(read);
3348     src   : C(read);
3349     MS    : R;
3350 %}
3351 
3352 // Integer Store to Memory
3353 pipe_class istore_mem_spORreg(memoryI mem, sp_ptr_RegP src) %{
3354     single_instruction;
3355     mem   : R(read);
3356     src   : C(read);
3357     MS    : R;
3358 %}
3359 
3360 // Float Store
3361 pipe_class fstoreF_mem_reg(memoryF mem, RegF src) %{
3362     single_instruction;
3363     mem : R(read);
3364     src : C(read);
3365     MS  : R;
3366 %}
3367 
3368 // Float Store
3369 pipe_class fstoreF_mem_zero(memoryF mem, immF0 src) %{
3370     single_instruction;
3371     mem : R(read);
3372     MS  : R;
3373 %}
3374 
3375 // Double Store
3376 pipe_class fstoreD_mem_reg(memoryD mem, RegD src) %{
3377     instruction_count(1);
3378     mem : R(read);
3379     src : C(read);
3380     MS  : R;
3381 %}
3382 
3383 // Double Store
3384 pipe_class fstoreD_mem_zero(memoryD mem, immD0 src) %{
3385     single_instruction;
3386     mem : R(read);
3387     MS  : R;
3388 %}
3389 
3390 // Integer Load (when sign bit propagation not needed)
3391 pipe_class iload_mem(iRegI dst, memoryI mem) %{
3392     single_instruction;
3393     mem : R(read);
3394     dst : C(write);
3395     MS  : R;
3396 %}
3397 
3398 // Integer Load (when sign bit propagation or masking is needed)
3399 pipe_class iload_mask_mem(iRegI dst, memoryI mem) %{
3400     single_instruction;
3401     mem : R(read);
3402     dst : M(write);
3403     MS  : R;
3404 %}
3405 
3406 // Float Load
3407 pipe_class floadF_mem(regF dst, memoryF mem) %{
3408     single_instruction;
3409     mem : R(read);
3410     dst : M(write);
3411     MS  : R;
3412 %}
3413 
3414 // Float Load
3415 pipe_class floadD_mem(regD dst, memoryD mem) %{
3416     instruction_count(1); multiple_bundles; // Again, unaligned argument is only multiple case
3417     mem : R(read);
3418     dst : M(write);
3419     MS  : R;
3420 %}
3421 
3422 // Memory Nop
3423 pipe_class mem_nop() %{
3424     single_instruction;
3425     MS  : R;
3426 %}
3427 
3428 pipe_class sethi(iRegP dst, immI src) %{
3429     single_instruction;
3430     dst  : E(write);
3431     IALU : R;
3432 %}
3433 
3434 pipe_class loadPollP(iRegP poll) %{
3435     single_instruction;
3436     poll : R(read);
3437     MS   : R;
3438 %}
3439 
3440 pipe_class br(Universe br, label labl) %{
3441     single_instruction_with_delay_slot;
3442     BR  : R;
3443 %}
3444 
3445 pipe_class br_cc(Universe br, cmpOp cmp, flagsReg cr, label labl) %{
3446     single_instruction_with_delay_slot;
3447     cr    : E(read);
3448     BR    : R;
3449 %}
3450 
3451 pipe_class br_reg(Universe br, cmpOp cmp, iRegI op1, label labl) %{
3452     single_instruction_with_delay_slot;
3453     op1 : E(read);
3454     BR  : R;
3455     MS  : R;
3456 %}
3457 
3458 pipe_class br_nop() %{
3459     single_instruction;
3460     BR  : R;
3461 %}
3462 
3463 pipe_class simple_call(method meth) %{
3464     instruction_count(2); multiple_bundles; force_serialization;
3465     fixed_latency(100);
3466     BR  : R(1);
3467     MS  : R(1);
3468     A0  : R(1);
3469 %}
3470 
3471 pipe_class compiled_call(method meth) %{
3472     instruction_count(1); multiple_bundles; force_serialization;
3473     fixed_latency(100);
3474     MS  : R(1);
3475 %}
3476 
3477 pipe_class call(method meth) %{
3478     instruction_count(0); multiple_bundles; force_serialization;
3479     fixed_latency(100);
3480 %}
3481 
3482 pipe_class tail_call(Universe ignore, label labl) %{
3483     single_instruction; has_delay_slot;
3484     fixed_latency(100);
3485     BR  : R(1);
3486     MS  : R(1);
3487 %}
3488 
3489 pipe_class ret(Universe ignore) %{
3490     single_instruction; has_delay_slot;
3491     BR  : R(1);
3492     MS  : R(1);
3493 %}
3494 
3495 // The real do-nothing guy
3496 pipe_class empty( ) %{
3497     instruction_count(0);
3498 %}
3499 
3500 pipe_class long_memory_op() %{
3501     instruction_count(0); multiple_bundles; force_serialization;
3502     fixed_latency(25);
3503     MS  : R(1);
3504 %}
3505 
3506 // Check-cast
3507 pipe_class partial_subtype_check_pipe(Universe ignore, iRegP array, iRegP match ) %{
3508     array : R(read);
3509     match  : R(read);
3510     IALU   : R(2);
3511     BR     : R(2);
3512     MS     : R;
3513 %}
3514 
3515 // Convert FPU flags into +1,0,-1
3516 pipe_class floating_cmp( iRegI dst, regF src1, regF src2 ) %{
3517     src1  : E(read);
3518     src2  : E(read);
3519     dst   : E(write);
3520     FA    : R;
3521     MS    : R(2);
3522     BR    : R(2);
3523 %}
3524 
3525 // Compare for p < q, and conditionally add y
3526 pipe_class cadd_cmpltmask( iRegI p, iRegI q, iRegI y ) %{
3527     p     : E(read);
3528     q     : E(read);
3529     y     : E(read);
3530     IALU  : R(3)
3531 %}
3532 
3533 // Perform a compare, then move conditionally in a branch delay slot.
3534 pipe_class min_max( iRegI src2, iRegI srcdst ) %{
3535     src2   : E(read);
3536     srcdst : E(read);
3537     IALU   : R;
3538     BR     : R;
3539 %}
3540 
3541 // Define the class for the Nop node
3542 define %{
3543    MachNop = ialu_nop;
3544 %}
3545 
3546 %}
3547 
3548 //----------INSTRUCTIONS-------------------------------------------------------
3549 
3550 //------------Special Nop instructions for bundling - no match rules-----------
3551 // Nop using the A0 functional unit
3552 instruct Nop_A0() %{
3553   ins_pipe(ialu_nop_A0);
3554 %}
3555 
3556 // Nop using the A1 functional unit
3557 instruct Nop_A1( ) %{
3558   ins_pipe(ialu_nop_A1);
3559 %}
3560 
3561 // Nop using the memory functional unit
3562 instruct Nop_MS( ) %{
3563   ins_pipe(mem_nop);
3564 %}
3565 
3566 // Nop using the floating add functional unit
3567 instruct Nop_FA( ) %{
3568   ins_pipe(fadd_nop);
3569 %}
3570 
3571 // Nop using the branch functional unit
3572 instruct Nop_BR( ) %{
3573   ins_pipe(br_nop);
3574 %}
3575 
3576 //----------Load/Store/Move Instructions---------------------------------------
3577 //----------Load Instructions--------------------------------------------------
3578 // Load Byte (8bit signed)
3579 instruct loadB(iRegI dst, memoryB mem) %{
3580   match(Set dst (LoadB mem));
3581   ins_cost(MEMORY_REF_COST);
3582 
3583   size(4);
3584   format %{ "LDRSB   $dst,$mem\t! byte -> int" %}
3585   ins_encode %{
3586     __ ldrsb($dst$$Register, $mem$$Address);
3587   %}
3588   ins_pipe(iload_mask_mem);
3589 %}
3590 
3591 // Load Byte (8bit signed) into a Long Register
3592 instruct loadB2L(iRegL dst, memoryB mem) %{
3593   match(Set dst (ConvI2L (LoadB mem)));
3594   ins_cost(MEMORY_REF_COST);
3595 
3596   size(8);
3597   format %{ "LDRSB $dst.lo,$mem\t! byte -> long\n\t"
3598             "ASR   $dst.hi,$dst.lo,31" %}
3599   ins_encode %{
3600     __ ldrsb($dst$$Register, $mem$$Address);
3601     __ mov($dst$$Register->successor(), AsmOperand($dst$$Register, asr, 31));
3602   %}
3603   ins_pipe(iload_mask_mem);
3604 %}
3605 
3606 // Load Unsigned Byte (8bit UNsigned) into an int reg
3607 instruct loadUB(iRegI dst, memoryB mem) %{
3608   match(Set dst (LoadUB mem));
3609   ins_cost(MEMORY_REF_COST);
3610 
3611   size(4);
3612   format %{ "LDRB   $dst,$mem\t! ubyte -> int" %}
3613   ins_encode %{
3614     __ ldrb($dst$$Register, $mem$$Address);
3615   %}
3616   ins_pipe(iload_mem);
3617 %}
3618 
3619 // Load Unsigned Byte (8bit UNsigned) into a Long Register
3620 instruct loadUB2L(iRegL dst, memoryB mem) %{
3621   match(Set dst (ConvI2L (LoadUB mem)));
3622   ins_cost(MEMORY_REF_COST);
3623 
3624   size(8);
3625   format %{ "LDRB  $dst.lo,$mem\t! ubyte -> long\n\t"
3626             "MOV   $dst.hi,0" %}
3627   ins_encode %{
3628     __ ldrb($dst$$Register, $mem$$Address);
3629     __ mov($dst$$Register->successor(), 0);
3630   %}
3631   ins_pipe(iload_mem);
3632 %}
3633 
3634 // Load Unsigned Byte (8 bit UNsigned) with immediate mask into Long Register
3635 instruct loadUB2L_limmI(iRegL dst, memoryB mem, limmIlow8 mask) %{
3636   match(Set dst (ConvI2L (AndI (LoadUB mem) mask)));
3637 
3638   ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST);
3639   size(12);
3640   format %{ "LDRB  $dst.lo,$mem\t! ubyte -> long\n\t"
3641             "MOV   $dst.hi,0\n\t"
3642             "AND  $dst.lo,$dst.lo,$mask" %}
3643   ins_encode %{
3644     __ ldrb($dst$$Register, $mem$$Address);
3645     __ mov($dst$$Register->successor(), 0);
3646     __ andr($dst$$Register, $dst$$Register, limmI_low($mask$$constant, 8));
3647   %}
3648   ins_pipe(iload_mem);
3649 %}
3650 
3651 // Load Short (16bit signed)
3652 
3653 instruct loadS(iRegI dst, memoryS mem) %{
3654   match(Set dst (LoadS mem));
3655   ins_cost(MEMORY_REF_COST);
3656 
3657   size(4);
3658   format %{ "LDRSH   $dst,$mem\t! short" %}
3659   ins_encode %{
3660     __ ldrsh($dst$$Register, $mem$$Address);
3661   %}
3662   ins_pipe(iload_mask_mem);
3663 %}
3664 
3665 // Load Short (16 bit signed) to Byte (8 bit signed)
3666 instruct loadS2B(iRegI dst, memoryS mem, immI_24 twentyfour) %{
3667   match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
3668   ins_cost(MEMORY_REF_COST);
3669 
3670   size(4);
3671 
3672   format %{ "LDRSB   $dst,$mem\t! short -> byte" %}
3673   ins_encode %{
3674     __ ldrsb($dst$$Register, $mem$$Address);
3675   %}
3676   ins_pipe(iload_mask_mem);
3677 %}
3678 
3679 // Load Short (16bit signed) into a Long Register
3680 instruct loadS2L(iRegL dst, memoryS mem) %{
3681   match(Set dst (ConvI2L (LoadS mem)));
3682   ins_cost(MEMORY_REF_COST);
3683 
3684   size(8);
3685   format %{ "LDRSH $dst.lo,$mem\t! short -> long\n\t"
3686             "ASR   $dst.hi,$dst.lo,31" %}
3687   ins_encode %{
3688     __ ldrsh($dst$$Register, $mem$$Address);
3689     __ mov($dst$$Register->successor(), AsmOperand($dst$$Register, asr, 31));
3690   %}
3691   ins_pipe(iload_mask_mem);
3692 %}
3693 
3694 // Load Unsigned Short/Char (16bit UNsigned)
3695 
3696 
3697 instruct loadUS(iRegI dst, memoryS mem) %{
3698   match(Set dst (LoadUS mem));
3699   ins_cost(MEMORY_REF_COST);
3700 
3701   size(4);
3702   format %{ "LDRH   $dst,$mem\t! ushort/char" %}
3703   ins_encode %{
3704     __ ldrh($dst$$Register, $mem$$Address);
3705   %}
3706   ins_pipe(iload_mem);
3707 %}
3708 
3709 // Load Unsigned Short/Char (16 bit UNsigned) to Byte (8 bit signed)
3710 instruct loadUS2B(iRegI dst, memoryB mem, immI_24 twentyfour) %{
3711   match(Set dst (RShiftI (LShiftI (LoadUS mem) twentyfour) twentyfour));
3712   ins_cost(MEMORY_REF_COST);
3713 
3714   size(4);
3715   format %{ "LDRSB   $dst,$mem\t! ushort -> byte" %}
3716   ins_encode %{
3717     __ ldrsb($dst$$Register, $mem$$Address);
3718   %}
3719   ins_pipe(iload_mask_mem);
3720 %}
3721 
3722 // Load Unsigned Short/Char (16bit UNsigned) into a Long Register
3723 instruct loadUS2L(iRegL dst, memoryS mem) %{
3724   match(Set dst (ConvI2L (LoadUS mem)));
3725   ins_cost(MEMORY_REF_COST);
3726 
3727   size(8);
3728   format %{ "LDRH  $dst.lo,$mem\t! short -> long\n\t"
3729             "MOV   $dst.hi, 0" %}
3730   ins_encode %{
3731     __ ldrh($dst$$Register, $mem$$Address);
3732     __ mov($dst$$Register->successor(), 0);
3733   %}
3734   ins_pipe(iload_mem);
3735 %}
3736 
3737 // Load Unsigned Short/Char (16bit UNsigned) with mask 0xFF into a Long Register
3738 instruct loadUS2L_immI_255(iRegL dst, memoryB mem, immI_255 mask) %{
3739   match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
3740   ins_cost(MEMORY_REF_COST);
3741 
3742   size(8);
3743   format %{ "LDRB  $dst.lo,$mem\t! \n\t"
3744             "MOV   $dst.hi, 0" %}
3745   ins_encode %{
3746     __ ldrb($dst$$Register, $mem$$Address);
3747     __ mov($dst$$Register->successor(), 0);
3748   %}
3749   ins_pipe(iload_mem);
3750 %}
3751 
3752 // Load Unsigned Short/Char (16bit UNsigned) with a immediate mask into a Long Register
3753 instruct loadUS2L_limmI(iRegL dst, memoryS mem, limmI mask) %{
3754   match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
3755   ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST);
3756 
3757   size(12);
3758   format %{ "LDRH   $dst,$mem\t! ushort/char & mask -> long\n\t"
3759             "MOV    $dst.hi, 0\n\t"
3760             "AND    $dst,$dst,$mask" %}
3761   ins_encode %{
3762     __ ldrh($dst$$Register, $mem$$Address);
3763     __ mov($dst$$Register->successor(), 0);
3764     __ andr($dst$$Register, $dst$$Register, $mask$$constant);
3765   %}
3766   ins_pipe(iload_mem);
3767 %}
3768 
3769 // Load Integer
3770 
3771 
3772 instruct loadI(iRegI dst, memoryI mem) %{
3773   match(Set dst (LoadI mem));
3774   ins_cost(MEMORY_REF_COST);
3775 
3776   size(4);
3777   format %{ "ldr_s32 $dst,$mem\t! int" %}
3778   ins_encode %{
3779     __ ldr_s32($dst$$Register, $mem$$Address);
3780   %}
3781   ins_pipe(iload_mem);
3782 %}
3783 
3784 // Load Integer to Byte (8 bit signed)
3785 instruct loadI2B(iRegI dst, memoryS mem, immI_24 twentyfour) %{
3786   match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
3787   ins_cost(MEMORY_REF_COST);
3788 
3789   size(4);
3790 
3791   format %{ "LDRSB   $dst,$mem\t! int -> byte" %}
3792   ins_encode %{
3793     __ ldrsb($dst$$Register, $mem$$Address);
3794   %}
3795   ins_pipe(iload_mask_mem);
3796 %}
3797 
3798 // Load Integer to Unsigned Byte (8 bit UNsigned)
3799 instruct loadI2UB(iRegI dst, memoryB mem, immI_255 mask) %{
3800   match(Set dst (AndI (LoadI mem) mask));
3801   ins_cost(MEMORY_REF_COST);
3802 
3803   size(4);
3804 
3805   format %{ "LDRB   $dst,$mem\t! int -> ubyte" %}
3806   ins_encode %{
3807     __ ldrb($dst$$Register, $mem$$Address);
3808   %}
3809   ins_pipe(iload_mask_mem);
3810 %}
3811 
3812 // Load Integer to Short (16 bit signed)
3813 instruct loadI2S(iRegI dst, memoryS mem, immI_16 sixteen) %{
3814   match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
3815   ins_cost(MEMORY_REF_COST);
3816 
3817   size(4);
3818   format %{ "LDRSH   $dst,$mem\t! int -> short" %}
3819   ins_encode %{
3820     __ ldrsh($dst$$Register, $mem$$Address);
3821   %}
3822   ins_pipe(iload_mask_mem);
3823 %}
3824 
3825 // Load Integer to Unsigned Short (16 bit UNsigned)
3826 instruct loadI2US(iRegI dst, memoryS mem, immI_65535 mask) %{
3827   match(Set dst (AndI (LoadI mem) mask));
3828   ins_cost(MEMORY_REF_COST);
3829 
3830   size(4);
3831   format %{ "LDRH   $dst,$mem\t! int -> ushort/char" %}
3832   ins_encode %{
3833     __ ldrh($dst$$Register, $mem$$Address);
3834   %}
3835   ins_pipe(iload_mask_mem);
3836 %}
3837 
3838 // Load Integer into a Long Register
3839 instruct loadI2L(iRegL dst, memoryI mem) %{
3840   match(Set dst (ConvI2L (LoadI mem)));
3841   ins_cost(MEMORY_REF_COST);
3842 
3843   size(8);
3844   format %{ "LDR   $dst.lo,$mem\t! int -> long\n\t"
3845             "ASR   $dst.hi,$dst.lo,31\t! int->long" %}
3846   ins_encode %{
3847     __ ldr($dst$$Register, $mem$$Address);
3848     __ mov($dst$$Register->successor(), AsmOperand($dst$$Register, asr, 31));
3849   %}
3850   ins_pipe(iload_mask_mem);
3851 %}
3852 
3853 // Load Integer with mask 0xFF into a Long Register
3854 instruct loadI2L_immI_255(iRegL dst, memoryB mem, immI_255 mask) %{
3855   match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
3856   ins_cost(MEMORY_REF_COST);
3857 
3858   size(8);
3859   format %{ "LDRB   $dst.lo,$mem\t! int & 0xFF -> long\n\t"
3860             "MOV    $dst.hi, 0" %}
3861   ins_encode %{
3862     __ ldrb($dst$$Register, $mem$$Address);
3863     __ mov($dst$$Register->successor(), 0);
3864   %}
3865   ins_pipe(iload_mem);
3866 %}
3867 
3868 // Load Integer with mask 0xFFFF into a Long Register
3869 instruct loadI2L_immI_65535(iRegL dst, memoryS mem, immI_65535 mask) %{
3870   match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
3871   ins_cost(MEMORY_REF_COST);
3872 
3873   size(8);
3874   format %{ "LDRH   $dst,$mem\t! int & 0xFFFF -> long\n\t"
3875             "MOV    $dst.hi, 0" %}
3876   ins_encode %{
3877     __ ldrh($dst$$Register, $mem$$Address);
3878     __ mov($dst$$Register->successor(), 0);
3879   %}
3880   ins_pipe(iload_mask_mem);
3881 %}
3882 
3883 // Load Integer with a 31-bit immediate mask into a Long Register
3884 instruct loadI2L_limmU31(iRegL dst, memoryI mem, limmU31 mask) %{
3885   match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
3886   ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST);
3887 
3888   size(12);
3889   format %{ "LDR   $dst.lo,$mem\t! int -> long\n\t"
3890             "MOV    $dst.hi, 0\n\t"
3891             "AND   $dst,$dst,$mask" %}
3892 
3893   ins_encode %{
3894     __ ldr($dst$$Register, $mem$$Address);
3895     __ mov($dst$$Register->successor(), 0);
3896     __ andr($dst$$Register, $dst$$Register, $mask$$constant);
3897   %}
3898   ins_pipe(iload_mem);
3899 %}
3900 
3901 // Load Integer with a 31-bit mask into a Long Register
3902 // FIXME: use iRegI mask, remove tmp?
3903 instruct loadI2L_immU31(iRegL dst, memoryI mem, immU31 mask, iRegI tmp) %{
3904   match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
3905   effect(TEMP dst, TEMP tmp);
3906 
3907   ins_cost(MEMORY_REF_COST + 4*DEFAULT_COST);
3908   size(20);
3909   format %{ "LDR      $mem,$dst\t! int & 31-bit mask -> long\n\t"
3910             "MOV      $dst.hi, 0\n\t"
3911             "MOV_SLOW $tmp,$mask\n\t"
3912             "AND      $dst,$tmp,$dst" %}
3913   ins_encode %{
3914     __ ldr($dst$$Register, $mem$$Address);
3915     __ mov($dst$$Register->successor(), 0);
3916     __ mov_slow($tmp$$Register, $mask$$constant);
3917     __ andr($dst$$Register, $dst$$Register, $tmp$$Register);
3918   %}
3919   ins_pipe(iload_mem);
3920 %}
3921 
3922 // Load Unsigned Integer into a Long Register
3923 instruct loadUI2L(iRegL dst, memoryI mem, immL_32bits mask) %{
3924   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
3925   ins_cost(MEMORY_REF_COST);
3926 
3927   size(8);
3928   format %{ "LDR   $dst.lo,$mem\t! uint -> long\n\t"
3929             "MOV   $dst.hi,0" %}
3930   ins_encode %{
3931     __ ldr($dst$$Register, $mem$$Address);
3932     __ mov($dst$$Register->successor(), 0);
3933   %}
3934   ins_pipe(iload_mem);
3935 %}
3936 
3937 // Load Long
3938 
3939 
3940 instruct loadL(iRegLd dst, memoryL mem ) %{
3941   predicate(!((LoadLNode*)n)->require_atomic_access());
3942   match(Set dst (LoadL mem));
3943   effect(TEMP dst);
3944   ins_cost(MEMORY_REF_COST);
3945 
3946   size(4);
3947   format %{ "ldr_64  $dst,$mem\t! long" %}
3948   ins_encode %{
3949     __ ldr_64($dst$$Register, $mem$$Address);
3950   %}
3951   ins_pipe(iload_mem);
3952 %}
3953 
3954 instruct loadL_2instr(iRegL dst, memorylong mem ) %{
3955   predicate(!((LoadLNode*)n)->require_atomic_access());
3956   match(Set dst (LoadL mem));
3957   ins_cost(MEMORY_REF_COST + DEFAULT_COST);
3958 
3959   size(8);
3960   format %{ "LDR    $dst.lo,$mem \t! long order of instrs reversed if $dst.lo == base($mem)\n\t"
3961             "LDR    $dst.hi,$mem+4 or $mem" %}
3962   ins_encode %{
3963     Address Amemlo = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
3964     Address Amemhi = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp + 4, relocInfo::none);
3965 
3966     if ($dst$$Register == reg_to_register_object($mem$$base)) {
3967       __ ldr($dst$$Register->successor(), Amemhi);
3968       __ ldr($dst$$Register, Amemlo);
3969     } else {
3970       __ ldr($dst$$Register, Amemlo);
3971       __ ldr($dst$$Register->successor(), Amemhi);
3972     }
3973   %}
3974   ins_pipe(iload_mem);
3975 %}
3976 
3977 instruct loadL_volatile(iRegL dst, indirect mem ) %{
3978   predicate(((LoadLNode*)n)->require_atomic_access());
3979   match(Set dst (LoadL mem));
3980   ins_cost(MEMORY_REF_COST);
3981 
3982   size(4);
3983   format %{ "LDMIA    $dst,$mem\t! long" %}
3984   ins_encode %{
3985     // FIXME: why is ldmia considered atomic?  Should be ldrexd
3986     RegisterSet set($dst$$Register);
3987     set = set | reg_to_register_object($dst$$reg + 1);
3988     __ ldmia(reg_to_register_object($mem$$base), set);
3989   %}
3990   ins_pipe(iload_mem);
3991 %}
3992 
3993 instruct loadL_volatile_fp(iRegL dst, memoryD mem ) %{
3994   predicate(((LoadLNode*)n)->require_atomic_access());
3995   match(Set dst (LoadL mem));
3996   ins_cost(MEMORY_REF_COST);
3997 
3998   size(8);
3999   format %{ "FLDD      S14, $mem"
4000             "FMRRD    $dst, S14\t! long \n't" %}
4001   ins_encode %{
4002     __ fldd(S14, $mem$$Address);
4003     __ fmrrd($dst$$Register, $dst$$Register->successor(), S14);
4004   %}
4005   ins_pipe(iload_mem);
4006 %}
4007 
4008 instruct loadL_unaligned(iRegL dst, memorylong mem ) %{
4009   match(Set dst (LoadL_unaligned mem));
4010   ins_cost(MEMORY_REF_COST);
4011 
4012   size(8);
4013   format %{ "LDR    $dst.lo,$mem\t! long order of instrs reversed if $dst.lo == base($mem)\n\t"
4014             "LDR    $dst.hi,$mem+4" %}
4015   ins_encode %{
4016     Address Amemlo = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
4017     Address Amemhi = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp + 4, relocInfo::none);
4018 
4019     if ($dst$$Register == reg_to_register_object($mem$$base)) {
4020       __ ldr($dst$$Register->successor(), Amemhi);
4021       __ ldr($dst$$Register, Amemlo);
4022     } else {
4023       __ ldr($dst$$Register, Amemlo);
4024       __ ldr($dst$$Register->successor(), Amemhi);
4025     }
4026   %}
4027   ins_pipe(iload_mem);
4028 %}
4029 
4030 // Load Range
4031 instruct loadRange(iRegI dst, memoryI mem) %{
4032   match(Set dst (LoadRange mem));
4033   ins_cost(MEMORY_REF_COST);
4034 
4035   size(4);
4036   format %{ "LDR_u32 $dst,$mem\t! range" %}
4037   ins_encode %{
4038     __ ldr_u32($dst$$Register, $mem$$Address);
4039   %}
4040   ins_pipe(iload_mem);
4041 %}
4042 
4043 // Load Pointer
4044 
4045 
4046 instruct loadP(iRegP dst, memoryP mem) %{
4047   match(Set dst (LoadP mem));
4048   ins_cost(MEMORY_REF_COST);
4049   size(4);
4050 
4051   format %{ "LDR   $dst,$mem\t! ptr" %}
4052   ins_encode %{
4053     __ ldr($dst$$Register, $mem$$Address);
4054   %}
4055   ins_pipe(iload_mem);
4056 %}
4057 
4058 #ifdef XXX
4059 // FIXME XXXX
4060 //instruct loadSP(iRegP dst, memoryP mem) %{
4061 instruct loadSP(SPRegP dst, memoryP mem, iRegP tmp) %{
4062   match(Set dst (LoadP mem));
4063   effect(TEMP tmp);
4064   ins_cost(MEMORY_REF_COST+1);
4065   size(8);
4066 
4067   format %{ "LDR   $tmp,$mem\t! ptr\n\t"
4068             "MOV   $dst,$tmp\t! ptr" %}
4069   ins_encode %{
4070     __ ldr($tmp$$Register, $mem$$Address);
4071     __ mov($dst$$Register, $tmp$$Register);
4072   %}
4073   ins_pipe(iload_mem);
4074 %}
4075 #endif
4076 
4077 #ifdef _LP64
4078 // Load Compressed Pointer
4079 
4080 // XXX This variant shouldn't be necessary if 6217251 is implemented
4081 instruct loadNoff(iRegN dst, memoryScaledI mem, aimmX off, iRegP tmp) %{
4082   match(Set dst (LoadN (AddP mem off)));
4083   ins_cost(MEMORY_REF_COST + DEFAULT_COST); // assume shift/sign-extend is free
4084   effect(TEMP tmp);
4085   size(4 * 2);
4086 
4087   format %{ "ldr_u32 $dst,$mem+$off\t! compressed ptr temp=$tmp" %}
4088   ins_encode %{
4089     Register base = reg_to_register_object($mem$$base);
4090     __ add($tmp$$Register, base, $off$$constant);
4091     Address nmem = Address::make_raw($tmp$$reg, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
4092     __ ldr_u32($dst$$Register, nmem);
4093   %}
4094   ins_pipe(iload_mem);
4095 %}
4096 
4097 instruct loadN(iRegN dst, memoryI mem) %{
4098   match(Set dst (LoadN mem));
4099   ins_cost(MEMORY_REF_COST);
4100   size(4);
4101 
4102   format %{ "ldr_u32 $dst,$mem\t! compressed ptr" %}
4103   ins_encode %{
4104     __ ldr_u32($dst$$Register, $mem$$Address);
4105   %}
4106   ins_pipe(iload_mem);
4107 %}
4108 #endif
4109 
4110 // Load Klass Pointer
4111 instruct loadKlass(iRegP dst, memoryI mem) %{
4112   match(Set dst (LoadKlass mem));
4113   ins_cost(MEMORY_REF_COST);
4114   size(4);
4115 
4116   format %{ "LDR   $dst,$mem\t! klass ptr" %}
4117   ins_encode %{
4118     __ ldr($dst$$Register, $mem$$Address);
4119   %}
4120   ins_pipe(iload_mem);
4121 %}
4122 
4123 #ifdef _LP64
4124 // Load narrow Klass Pointer
4125 instruct loadNKlass(iRegN dst, memoryI mem) %{
4126   match(Set dst (LoadNKlass mem));
4127   ins_cost(MEMORY_REF_COST);
4128   size(4);
4129 
4130   format %{ "ldr_u32 $dst,$mem\t! compressed klass ptr" %}
4131   ins_encode %{
4132     __ ldr_u32($dst$$Register, $mem$$Address);
4133   %}
4134   ins_pipe(iload_mem);
4135 %}
4136 #endif
4137 
4138 
4139 instruct loadD(regD dst, memoryD mem) %{
4140   match(Set dst (LoadD mem));
4141   ins_cost(MEMORY_REF_COST);
4142 
4143   size(4);
4144   // FIXME: needs to be atomic, but  ARMv7 A.R.M. guarantees
4145   // only LDREXD and STREXD are 64-bit single-copy atomic
4146   format %{ "FLDD   $dst,$mem" %}
4147   ins_encode %{
4148     __ ldr_double($dst$$FloatRegister, $mem$$Address);
4149   %}
4150   ins_pipe(floadD_mem);
4151 %}
4152 
4153 // Load Double - UNaligned
4154 instruct loadD_unaligned(regD_low dst, memoryF2 mem ) %{
4155   match(Set dst (LoadD_unaligned mem));
4156   ins_cost(MEMORY_REF_COST*2+DEFAULT_COST);
4157   size(8);
4158   format %{ "FLDS    $dst.lo,$mem\t! misaligned double\n"
4159           "\tFLDS    $dst.hi,$mem+4\t!" %}
4160   ins_encode %{
4161     Address Amemlo = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
4162     Address Amemhi = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp + 4, relocInfo::none);
4163       __ flds($dst$$FloatRegister, Amemlo);
4164       __ flds($dst$$FloatRegister->successor(), Amemhi);
4165   %}
4166   ins_pipe(iload_mem);
4167 %}
4168 
4169 
4170 instruct loadF(regF dst, memoryF mem) %{
4171   match(Set dst (LoadF mem));
4172 
4173   ins_cost(MEMORY_REF_COST);
4174   size(4);
4175   format %{ "FLDS    $dst,$mem" %}
4176   ins_encode %{
4177     __ ldr_float($dst$$FloatRegister, $mem$$Address);
4178   %}
4179   ins_pipe(floadF_mem);
4180 %}
4181 
4182 
4183 // // Load Constant
4184 instruct loadConI( iRegI dst, immI src ) %{
4185   match(Set dst src);
4186   ins_cost(DEFAULT_COST * 3/2);
4187   format %{ "MOV_SLOW    $dst, $src" %}
4188   ins_encode %{
4189     __ mov_slow($dst$$Register, $src$$constant);
4190   %}
4191   ins_pipe(ialu_hi_lo_reg);
4192 %}
4193 
4194 instruct loadConIMov( iRegI dst, immIMov src ) %{
4195   match(Set dst src);
4196   size(4);
4197   format %{ "MOV    $dst, $src" %}
4198   ins_encode %{
4199     __ mov($dst$$Register, $src$$constant);
4200   %}
4201   ins_pipe(ialu_imm);
4202 %}
4203 
4204 instruct loadConIMovn( iRegI dst, immIRotn src ) %{
4205   match(Set dst src);
4206   size(4);
4207   format %{ "MVN    $dst, ~$src" %}
4208   ins_encode %{
4209     __ mvn($dst$$Register, ~$src$$constant);
4210   %}
4211   ins_pipe(ialu_imm_n);
4212 %}
4213 
4214 instruct loadConI16( iRegI dst, immI16 src ) %{
4215   match(Set dst src);
4216   size(4);
4217   format %{ "MOVW    $dst, $src" %}
4218   ins_encode %{
4219     __ movw($dst$$Register, $src$$constant);
4220   %}
4221   ins_pipe(ialu_imm_n);
4222 %}
4223 
4224 instruct loadConP(iRegP dst, immP src) %{
4225   match(Set dst src);
4226   ins_cost(DEFAULT_COST * 3/2);
4227   format %{ "MOV_SLOW    $dst,$src\t!ptr" %}
4228   ins_encode %{
4229     relocInfo::relocType constant_reloc = _opnds[1]->constant_reloc();
4230     intptr_t val = $src$$constant;
4231     if (constant_reloc == relocInfo::oop_type) {
4232       __ mov_oop($dst$$Register, (jobject)val);
4233     } else if (constant_reloc == relocInfo::metadata_type) {
4234       __ mov_metadata($dst$$Register, (Metadata*)val);
4235     } else {
4236       __ mov_slow($dst$$Register, val);
4237     }
4238   %}
4239   ins_pipe(loadConP);
4240 %}
4241 
4242 
4243 instruct loadConP_poll(iRegP dst, immP_poll src) %{
4244   match(Set dst src);
4245   ins_cost(DEFAULT_COST);
4246   format %{ "MOV_SLOW    $dst,$src\t!ptr" %}
4247   ins_encode %{
4248       __ mov_slow($dst$$Register, $src$$constant);
4249   %}
4250   ins_pipe(loadConP_poll);
4251 %}
4252 
4253 instruct loadConL(iRegL dst, immL src) %{
4254   match(Set dst src);
4255   ins_cost(DEFAULT_COST * 4);
4256   format %{ "MOV_SLOW   $dst.lo, $src & 0x0FFFFFFFFL \t! long\n\t"
4257             "MOV_SLOW   $dst.hi, $src >> 32" %}
4258   ins_encode %{
4259     __ mov_slow(reg_to_register_object($dst$$reg), $src$$constant & 0x0FFFFFFFFL);
4260     __ mov_slow(reg_to_register_object($dst$$reg + 1), ((julong)($src$$constant)) >> 32);
4261   %}
4262   ins_pipe(loadConL);
4263 %}
4264 
4265 instruct loadConL16( iRegL dst, immL16 src ) %{
4266   match(Set dst src);
4267   ins_cost(DEFAULT_COST * 2);
4268 
4269   size(8);
4270   format %{ "MOVW    $dst.lo, $src \n\t"
4271             "MOVW    $dst.hi, 0 \n\t" %}
4272   ins_encode %{
4273     __ movw($dst$$Register, $src$$constant);
4274     __ movw($dst$$Register->successor(), 0);
4275   %}
4276   ins_pipe(ialu_imm);
4277 %}
4278 
4279 instruct loadConF_imm8(regF dst, imm8F src) %{
4280   match(Set dst src);
4281   ins_cost(DEFAULT_COST);
4282   size(4);
4283 
4284   format %{ "FCONSTS      $dst, $src"%}
4285 
4286   ins_encode %{
4287     __ fconsts($dst$$FloatRegister, Assembler::float_num($src$$constant).imm8());
4288   %}
4289   ins_pipe(loadConFD); // FIXME
4290 %}
4291 
4292 
4293 instruct loadConF(regF dst, immF src, iRegI tmp) %{
4294   match(Set dst src);
4295   ins_cost(DEFAULT_COST * 2);
4296   effect(TEMP tmp);
4297   size(3*4);
4298 
4299   format %{ "MOV_SLOW  $tmp, $src\n\t"
4300             "FMSR      $dst, $tmp"%}
4301 
4302   ins_encode %{
4303     // FIXME revisit once 6961697 is in
4304     union {
4305       jfloat f;
4306       int i;
4307     } v;
4308     v.f = $src$$constant;
4309     __ mov_slow($tmp$$Register, v.i);
4310     __ fmsr($dst$$FloatRegister, $tmp$$Register);
4311   %}
4312   ins_pipe(loadConFD); // FIXME
4313 %}
4314 
4315 instruct loadConD_imm8(regD dst, imm8D src) %{
4316   match(Set dst src);
4317   ins_cost(DEFAULT_COST);
4318   size(4);
4319 
4320   format %{ "FCONSTD      $dst, $src"%}
4321 
4322   ins_encode %{
4323     __ fconstd($dst$$FloatRegister, Assembler::double_num($src$$constant).imm8());
4324   %}
4325   ins_pipe(loadConFD); // FIXME
4326 %}
4327 
4328 instruct loadConD(regD dst, immD src, iRegP tmp) %{
4329   match(Set dst src);
4330   effect(TEMP tmp);
4331   ins_cost(MEMORY_REF_COST);
4332   format %{ "FLDD  $dst, [$constanttablebase + $constantoffset]\t! load from constant table: double=$src" %}
4333 
4334   ins_encode %{
4335     Register r = $constanttablebase;
4336     int offset  = $constantoffset($src);
4337     if (!is_memoryD(offset)) {                // can't use a predicate
4338                                               // in load constant instructs
4339       __ add_slow($tmp$$Register, r, offset);
4340       r = $tmp$$Register;
4341       offset = 0;
4342     }
4343     __ ldr_double($dst$$FloatRegister, Address(r, offset));
4344   %}
4345   ins_pipe(loadConFD);
4346 %}
4347 
4348 // Prefetch instructions.
4349 // Must be safe to execute with invalid address (cannot fault).
4350 
4351 instruct prefetchAlloc( memoryP mem ) %{
4352   match( PrefetchAllocation mem );
4353   ins_cost(MEMORY_REF_COST);
4354   size(4);
4355 
4356   format %{ "PLDW $mem\t! Prefetch allocation" %}
4357   ins_encode %{
4358     __ pldw($mem$$Address);
4359   %}
4360   ins_pipe(iload_mem);
4361 %}
4362 
4363 //----------Store Instructions-------------------------------------------------
4364 // Store Byte
4365 instruct storeB(memoryB mem, store_RegI src) %{
4366   match(Set mem (StoreB mem src));
4367   ins_cost(MEMORY_REF_COST);
4368 
4369   size(4);
4370   format %{ "STRB    $src,$mem\t! byte" %}
4371   ins_encode %{
4372     __ strb($src$$Register, $mem$$Address);
4373   %}
4374   ins_pipe(istore_mem_reg);
4375 %}
4376 
4377 instruct storeCM(memoryB mem, store_RegI src) %{
4378   match(Set mem (StoreCM mem src));
4379   ins_cost(MEMORY_REF_COST);
4380 
4381   size(4);
4382   format %{ "STRB    $src,$mem\t! CMS card-mark byte" %}
4383   ins_encode %{
4384     __ strb($src$$Register, $mem$$Address);
4385   %}
4386   ins_pipe(istore_mem_reg);
4387 %}
4388 
4389 // Store Char/Short
4390 
4391 
4392 instruct storeC(memoryS mem, store_RegI src) %{
4393   match(Set mem (StoreC mem src));
4394   ins_cost(MEMORY_REF_COST);
4395 
4396   size(4);
4397   format %{ "STRH    $src,$mem\t! short" %}
4398   ins_encode %{
4399     __ strh($src$$Register, $mem$$Address);
4400   %}
4401   ins_pipe(istore_mem_reg);
4402 %}
4403 
4404 // Store Integer
4405 
4406 
4407 instruct storeI(memoryI mem, store_RegI src) %{
4408   match(Set mem (StoreI mem src));
4409   ins_cost(MEMORY_REF_COST);
4410 
4411   size(4);
4412   format %{ "str_32 $src,$mem" %}
4413   ins_encode %{
4414     __ str_32($src$$Register, $mem$$Address);
4415   %}
4416   ins_pipe(istore_mem_reg);
4417 %}
4418 
4419 // Store Long
4420 
4421 
4422 instruct storeL(memoryL mem, store_RegLd src) %{
4423   predicate(!((StoreLNode*)n)->require_atomic_access());
4424   match(Set mem (StoreL mem src));
4425   ins_cost(MEMORY_REF_COST);
4426 
4427   size(4);
4428   format %{ "str_64  $src,$mem\t! long\n\t" %}
4429 
4430   ins_encode %{
4431     __ str_64($src$$Register, $mem$$Address);
4432   %}
4433   ins_pipe(istore_mem_reg);
4434 %}
4435 
4436 instruct storeL_2instr(memorylong mem, iRegL src) %{
4437   predicate(!((StoreLNode*)n)->require_atomic_access());
4438   match(Set mem (StoreL mem src));
4439   ins_cost(MEMORY_REF_COST + DEFAULT_COST);
4440 
4441   size(8);
4442   format %{ "STR    $src.lo,$mem\t! long\n\t"
4443             "STR    $src.hi,$mem+4" %}
4444 
4445   ins_encode %{
4446     Address Amemlo = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
4447     Address Amemhi = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp + 4, relocInfo::none);
4448     __ str($src$$Register, Amemlo);
4449     __ str($src$$Register->successor(), Amemhi);
4450   %}
4451   ins_pipe(istore_mem_reg);
4452 %}
4453 
4454 instruct storeL_volatile(indirect mem, iRegL src) %{
4455   predicate(((StoreLNode*)n)->require_atomic_access());
4456   match(Set mem (StoreL mem src));
4457   ins_cost(MEMORY_REF_COST);
4458   size(4);
4459   format %{ "STMIA    $src,$mem\t! long" %}
4460   ins_encode %{
4461     // FIXME: why is stmia considered atomic?  Should be strexd
4462     RegisterSet set($src$$Register);
4463     set = set | reg_to_register_object($src$$reg + 1);
4464     __ stmia(reg_to_register_object($mem$$base), set);
4465   %}
4466   ins_pipe(istore_mem_reg);
4467 %}
4468 
4469 instruct storeL_volatile_fp(memoryD mem, iRegL src) %{
4470   predicate(((StoreLNode*)n)->require_atomic_access());
4471   match(Set mem (StoreL mem src));
4472   ins_cost(MEMORY_REF_COST);
4473   size(8);
4474   format %{ "FMDRR    S14, $src\t! long \n\t"
4475             "FSTD     S14, $mem" %}
4476   ins_encode %{
4477     __ fmdrr(S14, $src$$Register, $src$$Register->successor());
4478     __ fstd(S14, $mem$$Address);
4479   %}
4480   ins_pipe(istore_mem_reg);
4481 %}
4482 
4483 #ifdef XXX
4484 // Move SP Pointer
4485 //instruct movSP(sp_ptr_RegP dst, SPRegP src) %{
4486 //instruct movSP(iRegP dst, SPRegP src) %{
4487 instruct movSP(store_ptr_RegP dst, SPRegP src) %{
4488   match(Set dst src);
4489 //predicate(!_kids[1]->_leaf->is_Proj() || _kids[1]->_leaf->as_Proj()->_con == TypeFunc::FramePtr);
4490   ins_cost(MEMORY_REF_COST);
4491   size(4);
4492 
4493   format %{ "MOV    $dst,$src\t! SP ptr\n\t" %}
4494   ins_encode %{
4495     assert(false, "XXX1 got here");
4496     __ mov($dst$$Register, SP);
4497     __ mov($dst$$Register, $src$$Register);
4498   %}
4499   ins_pipe(ialu_reg);
4500 %}
4501 #endif
4502 
4503 
4504 // Store Pointer
4505 
4506 
4507 instruct storeP(memoryP mem, store_ptr_RegP src) %{
4508   match(Set mem (StoreP mem src));
4509   ins_cost(MEMORY_REF_COST);
4510   size(4);
4511 
4512   format %{ "STR    $src,$mem\t! ptr" %}
4513   ins_encode %{
4514     __ str($src$$Register, $mem$$Address);
4515   %}
4516   ins_pipe(istore_mem_spORreg);
4517 %}
4518 
4519 
4520 #ifdef _LP64
4521 // Store Compressed Pointer
4522 
4523 
4524 instruct storeN(memoryI mem, store_RegN src) %{
4525   match(Set mem (StoreN mem src));
4526   ins_cost(MEMORY_REF_COST);
4527   size(4);
4528 
4529   format %{ "str_32 $src,$mem\t! compressed ptr" %}
4530   ins_encode %{
4531     __ str_32($src$$Register, $mem$$Address);
4532   %}
4533   ins_pipe(istore_mem_reg);
4534 %}
4535 
4536 
4537 // Store Compressed Klass Pointer
4538 instruct storeNKlass(memoryI mem, store_RegN src) %{
4539   match(Set mem (StoreNKlass mem src));
4540   ins_cost(MEMORY_REF_COST);
4541   size(4);
4542 
4543   format %{ "str_32 $src,$mem\t! compressed klass ptr" %}
4544   ins_encode %{
4545     __ str_32($src$$Register, $mem$$Address);
4546   %}
4547   ins_pipe(istore_mem_reg);
4548 %}
4549 #endif
4550 
4551 // Store Double
4552 
4553 
4554 instruct storeD(memoryD mem, regD src) %{
4555   match(Set mem (StoreD mem src));
4556   ins_cost(MEMORY_REF_COST);
4557 
4558   size(4);
4559   // FIXME: needs to be atomic, but  ARMv7 A.R.M. guarantees
4560   // only LDREXD and STREXD are 64-bit single-copy atomic
4561   format %{ "FSTD   $src,$mem" %}
4562   ins_encode %{
4563     __ str_double($src$$FloatRegister, $mem$$Address);
4564   %}
4565   ins_pipe(fstoreD_mem_reg);
4566 %}
4567 
4568 
4569 // Store Float
4570 
4571 
4572 instruct storeF( memoryF mem, regF src) %{
4573   match(Set mem (StoreF mem src));
4574   ins_cost(MEMORY_REF_COST);
4575 
4576   size(4);
4577   format %{ "FSTS    $src,$mem" %}
4578   ins_encode %{
4579     __ str_float($src$$FloatRegister, $mem$$Address);
4580   %}
4581   ins_pipe(fstoreF_mem_reg);
4582 %}
4583 
4584 
4585 //----------MemBar Instructions-----------------------------------------------
4586 // Memory barrier flavors
4587 
4588 // pattern-match out unnecessary membars
4589 instruct membar_storestore() %{
4590   match(MemBarStoreStore);
4591   ins_cost(4*MEMORY_REF_COST);
4592 
4593   size(4);
4594   format %{ "MEMBAR-storestore" %}
4595   ins_encode %{
4596     __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore), noreg);
4597   %}
4598   ins_pipe(long_memory_op);
4599 %}
4600 
4601 instruct membar_acquire() %{
4602   match(MemBarAcquire);
4603   match(LoadFence);
4604   ins_cost(4*MEMORY_REF_COST);
4605 
4606   size(4);
4607   format %{ "MEMBAR-acquire" %}
4608   ins_encode %{
4609     __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), noreg);
4610   %}
4611   ins_pipe(long_memory_op);
4612 %}
4613 
4614 instruct membar_acquire_lock() %{
4615   match(MemBarAcquireLock);
4616   ins_cost(0);
4617 
4618   size(0);
4619   format %{ "!MEMBAR-acquire (CAS in prior FastLock so empty encoding)" %}
4620   ins_encode( );
4621   ins_pipe(empty);
4622 %}
4623 
4624 instruct membar_release() %{
4625   match(MemBarRelease);
4626   match(StoreFence);
4627   ins_cost(4*MEMORY_REF_COST);
4628 
4629   size(4);
4630   format %{ "MEMBAR-release" %}
4631   ins_encode %{
4632     __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), noreg);
4633   %}
4634   ins_pipe(long_memory_op);
4635 %}
4636 
4637 instruct membar_release_lock() %{
4638   match(MemBarReleaseLock);
4639   ins_cost(0);
4640 
4641   size(0);
4642   format %{ "!MEMBAR-release (CAS in succeeding FastUnlock so empty encoding)" %}
4643   ins_encode( );
4644   ins_pipe(empty);
4645 %}
4646 
4647 instruct membar_volatile() %{
4648   match(MemBarVolatile);
4649   ins_cost(4*MEMORY_REF_COST);
4650 
4651   size(4);
4652   format %{ "MEMBAR-volatile" %}
4653   ins_encode %{
4654     __ membar(MacroAssembler::StoreLoad, noreg);
4655   %}
4656   ins_pipe(long_memory_op);
4657 %}
4658 
4659 instruct unnecessary_membar_volatile() %{
4660   match(MemBarVolatile);
4661   predicate(Matcher::post_store_load_barrier(n));
4662   ins_cost(0);
4663 
4664   size(0);
4665   format %{ "!MEMBAR-volatile (unnecessary so empty encoding)" %}
4666   ins_encode( );
4667   ins_pipe(empty);
4668 %}
4669 
4670 //----------Register Move Instructions-----------------------------------------
4671 // instruct roundDouble_nop(regD dst) %{
4672 //   match(Set dst (RoundDouble dst));
4673 //   ins_pipe(empty);
4674 // %}
4675 
4676 
4677 // instruct roundFloat_nop(regF dst) %{
4678 //   match(Set dst (RoundFloat dst));
4679 //   ins_pipe(empty);
4680 // %}
4681 
4682 
4683 
4684 // Cast Index to Pointer for unsafe natives
4685 instruct castX2P(iRegX src, iRegP dst) %{
4686   match(Set dst (CastX2P src));
4687 
4688   format %{ "MOV    $dst,$src\t! IntX->Ptr if $dst != $src" %}
4689   ins_encode %{
4690     if ($dst$$Register !=  $src$$Register) {
4691       __ mov($dst$$Register, $src$$Register);
4692     }
4693   %}
4694   ins_pipe(ialu_reg);
4695 %}
4696 
4697 // Cast Pointer to Index for unsafe natives
4698 instruct castP2X(iRegP src, iRegX dst) %{
4699   match(Set dst (CastP2X src));
4700 
4701   format %{ "MOV    $dst,$src\t! Ptr->IntX if $dst != $src" %}
4702   ins_encode %{
4703     if ($dst$$Register !=  $src$$Register) {
4704       __ mov($dst$$Register, $src$$Register);
4705     }
4706   %}
4707   ins_pipe(ialu_reg);
4708 %}
4709 
4710 //----------Conditional Move---------------------------------------------------
4711 // Conditional move
4712 instruct cmovIP_reg(cmpOpP cmp, flagsRegP pcc, iRegI dst, iRegI src) %{
4713   match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src)));
4714   ins_cost(150);
4715   size(4);
4716   format %{ "MOV$cmp  $dst,$src\t! int" %}
4717   ins_encode %{
4718     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
4719   %}
4720   ins_pipe(ialu_reg);
4721 %}
4722 
4723 
4724 instruct cmovIP_immMov(cmpOpP cmp, flagsRegP pcc, iRegI dst, immIMov src) %{
4725   match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src)));
4726   ins_cost(140);
4727   size(4);
4728   format %{ "MOV$cmp  $dst,$src" %}
4729   ins_encode %{
4730     __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
4731   %}
4732   ins_pipe(ialu_imm);
4733 %}
4734 
4735 instruct cmovIP_imm16(cmpOpP cmp, flagsRegP pcc, iRegI dst, immI16 src) %{
4736   match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src)));
4737   ins_cost(140);
4738   size(4);
4739   format %{ "MOVw$cmp  $dst,$src" %}
4740   ins_encode %{
4741     __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
4742   %}
4743   ins_pipe(ialu_imm);
4744 %}
4745 
4746 instruct cmovI_reg(cmpOp cmp, flagsReg icc, iRegI dst, iRegI src) %{
4747   match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
4748   ins_cost(150);
4749   size(4);
4750   format %{ "MOV$cmp  $dst,$src" %}
4751   ins_encode %{
4752     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
4753   %}
4754   ins_pipe(ialu_reg);
4755 %}
4756 
4757 
4758 instruct cmovI_immMov(cmpOp cmp, flagsReg icc, iRegI dst, immIMov src) %{
4759   match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
4760   ins_cost(140);
4761   size(4);
4762   format %{ "MOV$cmp  $dst,$src" %}
4763   ins_encode %{
4764     __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
4765   %}
4766   ins_pipe(ialu_imm);
4767 %}
4768 
4769 instruct cmovII_imm16(cmpOp cmp, flagsReg icc, iRegI dst, immI16 src) %{
4770   match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
4771   ins_cost(140);
4772   size(4);
4773   format %{ "MOVw$cmp  $dst,$src" %}
4774   ins_encode %{
4775     __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
4776   %}
4777   ins_pipe(ialu_imm);
4778 %}
4779 
4780 instruct cmovII_reg_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegI dst, iRegI src) %{
4781   match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
4782   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq ||
4783             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ||
4784             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt ||
4785             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
4786   ins_cost(150);
4787   size(4);
4788   format %{ "MOV$cmp  $dst,$src" %}
4789   ins_encode %{
4790     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
4791   %}
4792   ins_pipe(ialu_reg);
4793 %}
4794 
4795 instruct cmovII_immMov_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegI dst, immIMov src) %{
4796   match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
4797   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq ||
4798             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ||
4799             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt ||
4800             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
4801   ins_cost(140);
4802   size(4);
4803   format %{ "MOV$cmp  $dst,$src" %}
4804   ins_encode %{
4805     __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
4806   %}
4807   ins_pipe(ialu_imm);
4808 %}
4809 
4810 instruct cmovII_imm16_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegI dst, immI16 src) %{
4811   match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
4812   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq ||
4813             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ||
4814             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt ||
4815             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
4816   ins_cost(140);
4817   size(4);
4818   format %{ "MOVW$cmp  $dst,$src" %}
4819   ins_encode %{
4820     __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
4821   %}
4822   ins_pipe(ialu_imm);
4823 %}
4824 
4825 instruct cmovIIu_reg(cmpOpU cmp, flagsRegU icc, iRegI dst, iRegI src) %{
4826   match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
4827   ins_cost(150);
4828   size(4);
4829   format %{ "MOV$cmp  $dst,$src" %}
4830   ins_encode %{
4831     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
4832   %}
4833   ins_pipe(ialu_reg);
4834 %}
4835 
4836 instruct cmovIIu_immMov(cmpOpU cmp, flagsRegU icc, iRegI dst, immIMov src) %{
4837   match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
4838   ins_cost(140);
4839   size(4);
4840   format %{ "MOV$cmp  $dst,$src" %}
4841   ins_encode %{
4842     __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
4843   %}
4844   ins_pipe(ialu_imm);
4845 %}
4846 
4847 instruct cmovIIu_imm16(cmpOpU cmp, flagsRegU icc, iRegI dst, immI16 src) %{
4848   match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
4849   ins_cost(140);
4850   size(4);
4851   format %{ "MOVW$cmp  $dst,$src" %}
4852   ins_encode %{
4853     __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
4854   %}
4855   ins_pipe(ialu_imm);
4856 %}
4857 
4858 // Conditional move
4859 instruct cmovPP_reg(cmpOpP cmp, flagsRegP pcc, iRegP dst, iRegP src) %{
4860   match(Set dst (CMoveP (Binary cmp pcc) (Binary dst src)));
4861   ins_cost(150);
4862   size(4);
4863   format %{ "MOV$cmp  $dst,$src" %}
4864   ins_encode %{
4865     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
4866   %}
4867   ins_pipe(ialu_reg);
4868 %}
4869 
4870 instruct cmovPP_imm(cmpOpP cmp, flagsRegP pcc, iRegP dst, immP0 src) %{
4871   match(Set dst (CMoveP (Binary cmp pcc) (Binary dst src)));
4872   ins_cost(140);
4873   size(4);
4874   format %{ "MOV$cmp  $dst,$src" %}
4875   ins_encode %{
4876     __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
4877   %}
4878   ins_pipe(ialu_imm);
4879 %}
4880 
4881 // This instruction also works with CmpN so we don't need cmovPN_reg.
4882 instruct cmovPI_reg(cmpOp cmp, flagsReg icc, iRegP dst, iRegP src) %{
4883   match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
4884   ins_cost(150);
4885 
4886   size(4);
4887   format %{ "MOV$cmp  $dst,$src\t! ptr" %}
4888   ins_encode %{
4889     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
4890   %}
4891   ins_pipe(ialu_reg);
4892 %}
4893 
4894 instruct cmovPI_reg_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegP dst, iRegP src) %{
4895   match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
4896   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq ||
4897             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ||
4898             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt ||
4899             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
4900   ins_cost(150);
4901 
4902   size(4);
4903   format %{ "MOV$cmp  $dst,$src\t! ptr" %}
4904   ins_encode %{
4905     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
4906   %}
4907   ins_pipe(ialu_reg);
4908 %}
4909 
4910 instruct cmovPIu_reg(cmpOpU cmp, flagsRegU icc, iRegP dst, iRegP src) %{
4911   match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
4912   ins_cost(150);
4913 
4914   size(4);
4915   format %{ "MOV$cmp  $dst,$src\t! ptr" %}
4916   ins_encode %{
4917     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
4918   %}
4919   ins_pipe(ialu_reg);
4920 %}
4921 
4922 instruct cmovPI_imm(cmpOp cmp, flagsReg icc, iRegP dst, immP0 src) %{
4923   match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
4924   ins_cost(140);
4925 
4926   size(4);
4927   format %{ "MOV$cmp  $dst,$src\t! ptr" %}
4928   ins_encode %{
4929     __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
4930   %}
4931   ins_pipe(ialu_imm);
4932 %}
4933 
4934 instruct cmovPI_imm_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegP dst, immP0 src) %{
4935   match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
4936   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq ||
4937             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ||
4938             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt ||
4939             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
4940   ins_cost(140);
4941 
4942   size(4);
4943   format %{ "MOV$cmp  $dst,$src\t! ptr" %}
4944   ins_encode %{
4945     __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
4946   %}
4947   ins_pipe(ialu_imm);
4948 %}
4949 
4950 instruct cmovPIu_imm(cmpOpU cmp, flagsRegU icc, iRegP dst, immP0 src) %{
4951   match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
4952   ins_cost(140);
4953 
4954   size(4);
4955   format %{ "MOV$cmp  $dst,$src\t! ptr" %}
4956   ins_encode %{
4957     __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
4958   %}
4959   ins_pipe(ialu_imm);
4960 %}
4961 
4962 
4963 // Conditional move
4964 instruct cmovFP_reg(cmpOpP cmp, flagsRegP pcc, regF dst, regF src) %{
4965   match(Set dst (CMoveF (Binary cmp pcc) (Binary dst src)));
4966   ins_cost(150);
4967   size(4);
4968   format %{ "FCPYS$cmp $dst,$src" %}
4969   ins_encode %{
4970     __ fcpys($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
4971   %}
4972   ins_pipe(int_conditional_float_move);
4973 %}
4974 
4975 instruct cmovFI_reg(cmpOp cmp, flagsReg icc, regF dst, regF src) %{
4976   match(Set dst (CMoveF (Binary cmp icc) (Binary dst src)));
4977   ins_cost(150);
4978 
4979   size(4);
4980   format %{ "FCPYS$cmp $dst,$src" %}
4981   ins_encode %{
4982     __ fcpys($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
4983   %}
4984   ins_pipe(int_conditional_float_move);
4985 %}
4986 
4987 instruct cmovFI_reg_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, regF dst, regF src) %{
4988   match(Set dst (CMoveF (Binary cmp icc) (Binary dst src)));
4989   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq ||
4990             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ||
4991             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt ||
4992             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
4993   ins_cost(150);
4994 
4995   size(4);
4996   format %{ "FCPYS$cmp $dst,$src" %}
4997   ins_encode %{
4998     __ fcpys($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
4999   %}
5000   ins_pipe(int_conditional_float_move);
5001 %}
5002 
5003 instruct cmovFIu_reg(cmpOpU cmp, flagsRegU icc, regF dst, regF src) %{
5004   match(Set dst (CMoveF (Binary cmp icc) (Binary dst src)));
5005   ins_cost(150);
5006 
5007   size(4);
5008   format %{ "FCPYS$cmp $dst,$src" %}
5009   ins_encode %{
5010     __ fcpys($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
5011   %}
5012   ins_pipe(int_conditional_float_move);
5013 %}
5014 
5015 // Conditional move
5016 instruct cmovDP_reg(cmpOpP cmp, flagsRegP pcc, regD dst, regD src) %{
5017   match(Set dst (CMoveD (Binary cmp pcc) (Binary dst src)));
5018   ins_cost(150);
5019   size(4);
5020   format %{ "FCPYD$cmp $dst,$src" %}
5021   ins_encode %{
5022     __ fcpyd($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
5023   %}
5024   ins_pipe(int_conditional_double_move);
5025 %}
5026 
5027 instruct cmovDI_reg(cmpOp cmp, flagsReg icc, regD dst, regD src) %{
5028   match(Set dst (CMoveD (Binary cmp icc) (Binary dst src)));
5029   ins_cost(150);
5030 
5031   size(4);
5032   format %{ "FCPYD$cmp $dst,$src" %}
5033   ins_encode %{
5034     __ fcpyd($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
5035   %}
5036   ins_pipe(int_conditional_double_move);
5037 %}
5038 
5039 instruct cmovDI_reg_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, regD dst, regD src) %{
5040   match(Set dst (CMoveD (Binary cmp icc) (Binary dst src)));
5041   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
5042   ins_cost(150);
5043 
5044   size(4);
5045   format %{ "FCPYD$cmp $dst,$src" %}
5046   ins_encode %{
5047     __ fcpyd($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
5048   %}
5049   ins_pipe(int_conditional_double_move);
5050 %}
5051 
5052 instruct cmovDIu_reg(cmpOpU cmp, flagsRegU icc, regD dst, regD src) %{
5053   match(Set dst (CMoveD (Binary cmp icc) (Binary dst src)));
5054   ins_cost(150);
5055 
5056   size(4);
5057   format %{ "FCPYD$cmp $dst,$src" %}
5058   ins_encode %{
5059     __ fcpyd($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
5060   %}
5061   ins_pipe(int_conditional_double_move);
5062 %}
5063 
5064 // Conditional move
5065 instruct cmovLP_reg(cmpOpP cmp, flagsRegP pcc, iRegL dst, iRegL src) %{
5066   match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src)));
5067   ins_cost(150);
5068 
5069   size(8);
5070   format %{ "MOV$cmp  $dst.lo,$src.lo\t! long\n\t"
5071             "MOV$cmp  $dst.hi,$src.hi" %}
5072   ins_encode %{
5073     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
5074     __ mov($dst$$Register->successor(), $src$$Register->successor(), (AsmCondition)($cmp$$cmpcode));
5075   %}
5076   ins_pipe(ialu_reg);
5077 %}
5078 
5079 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
5080 // (hi($con$$constant), lo($con$$constant)) becomes
5081 instruct cmovLP_immRot(cmpOpP cmp, flagsRegP pcc, iRegL dst, immLlowRot src) %{
5082   match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src)));
5083   ins_cost(140);
5084 
5085   size(8);
5086   format %{ "MOV$cmp  $dst.lo,$src\t! long\n\t"
5087             "MOV$cmp  $dst.hi,0" %}
5088   ins_encode %{
5089     __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
5090     __ mov($dst$$Register->successor(), 0, (AsmCondition)($cmp$$cmpcode));
5091   %}
5092   ins_pipe(ialu_imm);
5093 %}
5094 
5095 instruct cmovLP_imm16(cmpOpP cmp, flagsRegP pcc, iRegL dst, immL16 src) %{
5096   match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src)));
5097   ins_cost(140);
5098 
5099   size(8);
5100   format %{ "MOV$cmp  $dst.lo,$src\t! long\n\t"
5101             "MOV$cmp  $dst.hi,0" %}
5102   ins_encode %{
5103     __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
5104     __ mov($dst$$Register->successor(), 0, (AsmCondition)($cmp$$cmpcode));
5105   %}
5106   ins_pipe(ialu_imm);
5107 %}
5108 
5109 instruct cmovLI_reg(cmpOp cmp, flagsReg icc, iRegL dst, iRegL src) %{
5110   match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
5111   ins_cost(150);
5112 
5113   size(8);
5114   format %{ "MOV$cmp  $dst.lo,$src.lo\t! long\n\t"
5115             "MOV$cmp  $dst.hi,$src.hi" %}
5116   ins_encode %{
5117     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
5118     __ mov($dst$$Register->successor(), $src$$Register->successor(), (AsmCondition)($cmp$$cmpcode));
5119   %}
5120   ins_pipe(ialu_reg);
5121 %}
5122 
5123 instruct cmovLI_reg_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegL dst, iRegL src) %{
5124   match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
5125   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq ||
5126             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ||
5127             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt ||
5128             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
5129   ins_cost(150);
5130 
5131   size(8);
5132   format %{ "MOV$cmp  $dst.lo,$src.lo\t! long\n\t"
5133             "MOV$cmp  $dst.hi,$src.hi" %}
5134   ins_encode %{
5135     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
5136     __ mov($dst$$Register->successor(), $src$$Register->successor(), (AsmCondition)($cmp$$cmpcode));
5137   %}
5138   ins_pipe(ialu_reg);
5139 %}
5140 
5141 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
5142 // (hi($con$$constant), lo($con$$constant)) becomes
5143 instruct cmovLI_immRot(cmpOp cmp, flagsReg icc, iRegL dst, immLlowRot src) %{
5144   match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
5145   ins_cost(140);
5146 
5147   size(8);
5148   format %{ "MOV$cmp  $dst.lo,$src\t! long\n\t"
5149             "MOV$cmp  $dst.hi,0" %}
5150   ins_encode %{
5151     __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
5152     __ mov($dst$$Register->successor(), 0, (AsmCondition)($cmp$$cmpcode));
5153   %}
5154   ins_pipe(ialu_imm);
5155 %}
5156 
5157 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
5158 // (hi($con$$constant), lo($con$$constant)) becomes
5159 instruct cmovLI_immRot_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegL dst, immLlowRot src) %{
5160   match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
5161   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq ||
5162             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ||
5163             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt ||
5164             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
5165   ins_cost(140);
5166 
5167   size(8);
5168   format %{ "MOV$cmp  $dst.lo,$src\t! long\n\t"
5169             "MOV$cmp  $dst.hi,0" %}
5170   ins_encode %{
5171     __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
5172     __ mov($dst$$Register->successor(), 0, (AsmCondition)($cmp$$cmpcode));
5173   %}
5174   ins_pipe(ialu_imm);
5175 %}
5176 
5177 instruct cmovLI_imm16(cmpOp cmp, flagsReg icc, iRegL dst, immL16 src) %{
5178   match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
5179   ins_cost(140);
5180 
5181   size(8);
5182   format %{ "MOV$cmp  $dst.lo,$src\t! long\n\t"
5183             "MOV$cmp  $dst.hi,0" %}
5184   ins_encode %{
5185     __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
5186     __ movw($dst$$Register->successor(), 0, (AsmCondition)($cmp$$cmpcode));
5187   %}
5188   ins_pipe(ialu_imm);
5189 %}
5190 
5191 instruct cmovLI_imm16_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegL dst, immL16 src) %{
5192   match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
5193   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq ||
5194             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ||
5195             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt ||
5196             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
5197   ins_cost(140);
5198 
5199   size(8);
5200   format %{ "MOV$cmp  $dst.lo,$src\t! long\n\t"
5201             "MOV$cmp  $dst.hi,0" %}
5202   ins_encode %{
5203     __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
5204     __ movw($dst$$Register->successor(), 0, (AsmCondition)($cmp$$cmpcode));
5205   %}
5206   ins_pipe(ialu_imm);
5207 %}
5208 
5209 instruct cmovLIu_reg(cmpOpU cmp, flagsRegU icc, iRegL dst, iRegL src) %{
5210   match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
5211   ins_cost(150);
5212 
5213   size(8);
5214   format %{ "MOV$cmp  $dst.lo,$src.lo\t! long\n\t"
5215             "MOV$cmp  $dst.hi,$src.hi" %}
5216   ins_encode %{
5217     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
5218     __ mov($dst$$Register->successor(), $src$$Register->successor(), (AsmCondition)($cmp$$cmpcode));
5219   %}
5220   ins_pipe(ialu_reg);
5221 %}
5222 
5223 
5224 //----------OS and Locking Instructions----------------------------------------
5225 
5226 // This name is KNOWN by the ADLC and cannot be changed.
5227 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
5228 // for this guy.
5229 instruct tlsLoadP(RthreadRegP dst) %{
5230   match(Set dst (ThreadLocal));
5231 
5232   size(0);
5233   ins_cost(0);
5234   format %{ "! TLS is in $dst" %}
5235   ins_encode( /*empty encoding*/ );
5236   ins_pipe(ialu_none);
5237 %}
5238 
5239 instruct checkCastPP( iRegP dst ) %{
5240   match(Set dst (CheckCastPP dst));
5241 
5242   size(0);
5243   format %{ "! checkcastPP of $dst" %}
5244   ins_encode( /*empty encoding*/ );
5245   ins_pipe(empty);
5246 %}
5247 
5248 
5249 instruct castPP( iRegP dst ) %{
5250   match(Set dst (CastPP dst));
5251   format %{ "! castPP of $dst" %}
5252   ins_encode( /*empty encoding*/ );
5253   ins_pipe(empty);
5254 %}
5255 
5256 instruct castII( iRegI dst ) %{
5257   match(Set dst (CastII dst));
5258   format %{ "! castII of $dst" %}
5259   ins_encode( /*empty encoding*/ );
5260   ins_cost(0);
5261   ins_pipe(empty);
5262 %}
5263 
5264 //----------Arithmetic Instructions--------------------------------------------
5265 // Addition Instructions
5266 // Register Addition
5267 instruct addI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
5268   match(Set dst (AddI src1 src2));
5269 
5270   size(4);
5271   format %{ "add_32 $dst,$src1,$src2\t! int" %}
5272   ins_encode %{
5273     __ add_32($dst$$Register, $src1$$Register, $src2$$Register);
5274   %}
5275   ins_pipe(ialu_reg_reg);
5276 %}
5277 
5278 instruct addshlI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
5279   match(Set dst (AddI (LShiftI src1 src2) src3));
5280 
5281   size(4);
5282   format %{ "add_32 $dst,$src3,$src1<<$src2\t! int" %}
5283   ins_encode %{
5284     __ add_32($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, lsl, $src2$$Register));
5285   %}
5286   ins_pipe(ialu_reg_reg);
5287 %}
5288 
5289 
5290 instruct addshlI_reg_imm_reg(iRegI dst, iRegI src1, immU5 src2, iRegI src3) %{
5291   match(Set dst (AddI (LShiftI src1 src2) src3));
5292 
5293   size(4);
5294   format %{ "add_32 $dst,$src3,$src1<<$src2\t! int" %}
5295   ins_encode %{
5296     __ add_32($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, lsl, $src2$$constant));
5297   %}
5298   ins_pipe(ialu_reg_reg);
5299 %}
5300 
5301 instruct addsarI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
5302   match(Set dst (AddI (RShiftI src1 src2) src3));
5303 
5304   size(4);
5305   format %{ "add_32 $dst,$src3,$src1>>$src2\t! int" %}
5306   ins_encode %{
5307     __ add_32($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, asr, $src2$$Register));
5308   %}
5309   ins_pipe(ialu_reg_reg);
5310 %}
5311 
5312 instruct addsarI_reg_imm_reg(iRegI dst, iRegI src1, immU5 src2, iRegI src3) %{
5313   match(Set dst (AddI (RShiftI src1 src2) src3));
5314 
5315   size(4);
5316   format %{ "add_32 $dst,$src3,$src1>>$src2\t! int" %}
5317   ins_encode %{
5318     __ add_32($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, asr, $src2$$constant));
5319   %}
5320   ins_pipe(ialu_reg_reg);
5321 %}
5322 
5323 instruct addshrI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
5324   match(Set dst (AddI (URShiftI src1 src2) src3));
5325 
5326   size(4);
5327   format %{ "add_32 $dst,$src3,$src1>>>$src2\t! int" %}
5328   ins_encode %{
5329     __ add_32($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, lsr, $src2$$Register));
5330   %}
5331   ins_pipe(ialu_reg_reg);
5332 %}
5333 
5334 instruct addshrI_reg_imm_reg(iRegI dst, iRegI src1, immU5 src2, iRegI src3) %{
5335   match(Set dst (AddI (URShiftI src1 src2) src3));
5336 
5337   size(4);
5338   format %{ "add_32 $dst,$src3,$src1>>>$src2\t! int" %}
5339   ins_encode %{
5340     __ add_32($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, lsr, $src2$$constant));
5341   %}
5342   ins_pipe(ialu_reg_reg);
5343 %}
5344 
5345 // Immediate Addition
5346 instruct addI_reg_aimmI(iRegI dst, iRegI src1, aimmI src2) %{
5347   match(Set dst (AddI src1 src2));
5348 
5349   size(4);
5350   format %{ "add_32 $dst,$src1,$src2\t! int" %}
5351   ins_encode %{
5352     __ add_32($dst$$Register, $src1$$Register, $src2$$constant);
5353   %}
5354   ins_pipe(ialu_reg_imm);
5355 %}
5356 
5357 // Pointer Register Addition
5358 instruct addP_reg_reg(iRegP dst, iRegP src1, iRegX src2) %{
5359   match(Set dst (AddP src1 src2));
5360 
5361   size(4);
5362   format %{ "ADD    $dst,$src1,$src2\t! ptr" %}
5363   ins_encode %{
5364     __ add($dst$$Register, $src1$$Register, $src2$$Register);
5365   %}
5366   ins_pipe(ialu_reg_reg);
5367 %}
5368 
5369 
5370 // shifted iRegX operand
5371 operand shiftedX(iRegX src2, shimmX src3) %{
5372 //constraint(ALLOC_IN_RC(sp_ptr_reg));
5373   match(LShiftX src2 src3);
5374 
5375   op_cost(1);
5376   format %{ "$src2 << $src3" %}
5377   interface(MEMORY_INTER) %{
5378     base($src2);
5379     index(0xff);
5380     scale($src3);
5381     disp(0x0);
5382   %}
5383 %}
5384 
5385 instruct addshlP_reg_reg_imm(iRegP dst, iRegP src1, shiftedX src2) %{
5386   match(Set dst (AddP src1 src2));
5387 
5388   ins_cost(DEFAULT_COST * 3/2);
5389   size(4);
5390   format %{ "ADD    $dst,$src1,$src2\t! ptr" %}
5391   ins_encode %{
5392     Register base = reg_to_register_object($src2$$base);
5393     __ add($dst$$Register, $src1$$Register, AsmOperand(base, lsl, $src2$$scale));
5394   %}
5395   ins_pipe(ialu_reg_reg);
5396 %}
5397 
5398 // Pointer Immediate Addition
5399 instruct addP_reg_aimmX(iRegP dst, iRegP src1, aimmX src2) %{
5400   match(Set dst (AddP src1 src2));
5401 
5402   size(4);
5403   format %{ "ADD    $dst,$src1,$src2\t! ptr" %}
5404   ins_encode %{
5405     __ add($dst$$Register, $src1$$Register, $src2$$constant);
5406   %}
5407   ins_pipe(ialu_reg_imm);
5408 %}
5409 
5410 // Long Addition
5411 instruct addL_reg_reg(iRegL dst, iRegL src1, iRegL src2, flagsReg ccr) %{
5412   match(Set dst (AddL src1 src2));
5413   effect(KILL ccr);
5414   size(8);
5415   format %{ "ADDS    $dst.lo,$src1.lo,$src2.lo\t! long\n\t"
5416             "ADC     $dst.hi,$src1.hi,$src2.hi" %}
5417   ins_encode %{
5418     __ adds($dst$$Register, $src1$$Register, $src2$$Register);
5419     __ adc($dst$$Register->successor(), $src1$$Register->successor(), $src2$$Register->successor());
5420   %}
5421   ins_pipe(ialu_reg_reg);
5422 %}
5423 
5424 // TODO
5425 
5426 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
5427 // (hi($con$$constant), lo($con$$constant)) becomes
5428 instruct addL_reg_immRot(iRegL dst, iRegL src1, immLlowRot con, flagsReg ccr) %{
5429   match(Set dst (AddL src1 con));
5430   effect(KILL ccr);
5431   size(8);
5432   format %{ "ADDS    $dst.lo,$src1.lo,$con\t! long\n\t"
5433             "ADC     $dst.hi,$src1.hi,0" %}
5434   ins_encode %{
5435     __ adds($dst$$Register, $src1$$Register, $con$$constant);
5436     __ adc($dst$$Register->successor(), $src1$$Register->successor(), 0);
5437   %}
5438   ins_pipe(ialu_reg_imm);
5439 %}
5440 
5441 //----------Conditional_store--------------------------------------------------
5442 // Conditional-store of the updated heap-top.
5443 // Used during allocation of the shared heap.
5444 // Sets flags (EQ) on success.
5445 
5446 // LoadP-locked.
5447 instruct loadPLocked(iRegP dst, memoryex mem) %{
5448   match(Set dst (LoadPLocked mem));
5449   size(4);
5450   format %{ "LDREX  $dst,$mem" %}
5451   ins_encode %{
5452     __ ldrex($dst$$Register,$mem$$Address);
5453   %}
5454   ins_pipe(iload_mem);
5455 %}
5456 
5457 instruct storePConditional( memoryex heap_top_ptr, iRegP oldval, iRegP newval, iRegI tmp, flagsRegP pcc ) %{
5458   predicate(_kids[1]->_kids[0]->_leaf->Opcode() == Op_LoadPLocked); // only works in conjunction with a LoadPLocked node
5459   match(Set pcc (StorePConditional heap_top_ptr (Binary oldval newval)));
5460   effect( TEMP tmp );
5461   size(8);
5462   format %{ "STREX  $tmp,$newval,$heap_top_ptr\n\t"
5463             "CMP    $tmp, 0" %}
5464   ins_encode %{
5465     __ strex($tmp$$Register, $newval$$Register, $heap_top_ptr$$Address);
5466     __ cmp($tmp$$Register, 0);
5467   %}
5468   ins_pipe( long_memory_op );
5469 %}
5470 
5471 // Conditional-store of an intx value.
5472 instruct storeXConditional( memoryex mem, iRegX oldval, iRegX newval, iRegX tmp, flagsReg icc ) %{
5473   match(Set icc (StoreIConditional mem (Binary oldval newval)));
5474   effect( TEMP tmp );
5475   size(28);
5476   format %{ "loop: \n\t"
5477             "LDREX    $tmp, $mem\t! If $oldval==[$mem] Then store $newval into [$mem], DOESN'T set $newval=[$mem] in any case\n\t"
5478             "XORS     $tmp,$tmp, $oldval\n\t"
5479             "STREX.eq $tmp, $newval, $mem\n\t"
5480             "CMP.eq   $tmp, 1 \n\t"
5481             "B.eq     loop \n\t"
5482             "TEQ      $tmp, 0\n\t"
5483             "membar   LoadStore|LoadLoad" %}
5484   ins_encode %{
5485     Label loop;
5486     __ bind(loop);
5487     __ ldrex($tmp$$Register, $mem$$Address);
5488     __ eors($tmp$$Register, $tmp$$Register, $oldval$$Register);
5489     __ strex($tmp$$Register, $newval$$Register, $mem$$Address, eq);
5490     __ cmp($tmp$$Register, 1, eq);
5491     __ b(loop, eq);
5492     __ teq($tmp$$Register, 0);
5493     // used by biased locking only. Requires a membar.
5494     __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadStore | MacroAssembler::LoadLoad), noreg);
5495   %}
5496   ins_pipe( long_memory_op );
5497 %}
5498 
5499 // No flag versions for CompareAndSwap{P,I,L} because matcher can't match them
5500 
5501 instruct compareAndSwapL_bool(memoryex mem, iRegL oldval, iRegLd newval, iRegI res, iRegLd tmp, flagsReg ccr ) %{
5502   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
5503   effect( KILL ccr, TEMP tmp);
5504   size(32);
5505   format %{ "loop: \n\t"
5506             "LDREXD   $tmp, $mem\t! If $oldval==[$mem] Then store $newval into [$mem]\n\t"
5507             "CMP      $tmp.lo, $oldval.lo\n\t"
5508             "CMP.eq   $tmp.hi, $oldval.hi\n\t"
5509             "STREXD.eq $tmp, $newval, $mem\n\t"
5510             "MOV.ne   $tmp, 0 \n\t"
5511             "XORS.eq  $tmp,$tmp, 1 \n\t"
5512             "B.eq     loop \n\t"
5513             "MOV      $res, $tmp" %}
5514   ins_encode %{
5515     Label loop;
5516     __ bind(loop);
5517     __ ldrexd($tmp$$Register, $mem$$Address);
5518     __ cmp($tmp$$Register, $oldval$$Register);
5519     __ cmp($tmp$$Register->successor(), $oldval$$Register->successor(), eq);
5520     __ strexd($tmp$$Register, $newval$$Register, $mem$$Address, eq);
5521     __ mov($tmp$$Register, 0, ne);
5522     __ eors($tmp$$Register, $tmp$$Register, 1, eq);
5523     __ b(loop, eq);
5524     __ mov($res$$Register, $tmp$$Register);
5525   %}
5526   ins_pipe( long_memory_op );
5527 %}
5528 
5529 
5530 instruct compareAndSwapI_bool(memoryex mem, iRegI oldval, iRegI newval, iRegI res, iRegI tmp, flagsReg ccr ) %{
5531   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
5532   effect( KILL ccr, TEMP tmp);
5533   size(28);
5534   format %{ "loop: \n\t"
5535             "LDREX    $tmp, $mem\t! If $oldval==[$mem] Then store $newval into [$mem]\n\t"
5536             "CMP      $tmp, $oldval\n\t"
5537             "STREX.eq $tmp, $newval, $mem\n\t"
5538             "MOV.ne   $tmp, 0 \n\t"
5539             "XORS.eq  $tmp,$tmp, 1 \n\t"
5540             "B.eq     loop \n\t"
5541             "MOV      $res, $tmp" %}
5542 
5543   ins_encode %{
5544     Label loop;
5545     __ bind(loop);
5546     __ ldrex($tmp$$Register,$mem$$Address);
5547     __ cmp($tmp$$Register, $oldval$$Register);
5548     __ strex($tmp$$Register, $newval$$Register, $mem$$Address, eq);
5549     __ mov($tmp$$Register, 0, ne);
5550     __ eors($tmp$$Register, $tmp$$Register, 1, eq);
5551     __ b(loop, eq);
5552     __ mov($res$$Register, $tmp$$Register);
5553   %}
5554   ins_pipe( long_memory_op );
5555 %}
5556 
5557 instruct compareAndSwapP_bool(memoryex mem, iRegP oldval, iRegP newval, iRegI res, iRegI tmp, flagsReg ccr ) %{
5558   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
5559   effect( KILL ccr, TEMP tmp);
5560   size(28);
5561   format %{ "loop: \n\t"
5562             "LDREX    $tmp, $mem\t! If $oldval==[$mem] Then store $newval into [$mem]\n\t"
5563             "CMP      $tmp, $oldval\n\t"
5564             "STREX.eq $tmp, $newval, $mem\n\t"
5565             "MOV.ne   $tmp, 0 \n\t"
5566             "EORS.eq  $tmp,$tmp, 1 \n\t"
5567             "B.eq     loop \n\t"
5568             "MOV      $res, $tmp" %}
5569 
5570   ins_encode %{
5571     Label loop;
5572     __ bind(loop);
5573     __ ldrex($tmp$$Register,$mem$$Address);
5574     __ cmp($tmp$$Register, $oldval$$Register);
5575     __ strex($tmp$$Register, $newval$$Register, $mem$$Address, eq);
5576     __ mov($tmp$$Register, 0, ne);
5577     __ eors($tmp$$Register, $tmp$$Register, 1, eq);
5578     __ b(loop, eq);
5579     __ mov($res$$Register, $tmp$$Register);
5580   %}
5581   ins_pipe( long_memory_op );
5582 %}
5583 
5584 instruct xaddI_aimmI_no_res(memoryex mem, aimmI add, Universe dummy, iRegI tmp1, iRegI tmp2, flagsReg ccr) %{
5585   predicate(n->as_LoadStore()->result_not_used());
5586   match(Set dummy (GetAndAddI mem add));
5587   effect(KILL ccr, TEMP tmp1, TEMP tmp2);
5588   size(20);
5589   format %{ "loop: \n\t"
5590             "LDREX    $tmp1, $mem\n\t"
5591             "ADD      $tmp1, $tmp1, $add\n\t"
5592             "STREX    $tmp2, $tmp1, $mem\n\t"
5593             "CMP      $tmp2, 0 \n\t"
5594             "B.ne     loop \n\t" %}
5595 
5596   ins_encode %{
5597     Label loop;
5598     __ bind(loop);
5599     __ ldrex($tmp1$$Register,$mem$$Address);
5600     __ add($tmp1$$Register, $tmp1$$Register, $add$$constant);
5601     __ strex($tmp2$$Register, $tmp1$$Register, $mem$$Address);
5602     __ cmp($tmp2$$Register, 0);
5603     __ b(loop, ne);
5604   %}
5605   ins_pipe( long_memory_op );
5606 %}
5607 
5608 instruct xaddI_reg_no_res(memoryex mem, iRegI add, Universe dummy, iRegI tmp1, iRegI tmp2, flagsReg ccr) %{
5609   predicate(n->as_LoadStore()->result_not_used());
5610   match(Set dummy (GetAndAddI mem add));
5611   effect(KILL ccr, TEMP tmp1, TEMP tmp2);
5612   size(20);
5613   format %{ "loop: \n\t"
5614             "LDREX    $tmp1, $mem\n\t"
5615             "ADD      $tmp1, $tmp1, $add\n\t"
5616             "STREX    $tmp2, $tmp1, $mem\n\t"
5617             "CMP      $tmp2, 0 \n\t"
5618             "B.ne     loop \n\t" %}
5619 
5620   ins_encode %{
5621     Label loop;
5622     __ bind(loop);
5623     __ ldrex($tmp1$$Register,$mem$$Address);
5624     __ add($tmp1$$Register, $tmp1$$Register, $add$$Register);
5625     __ strex($tmp2$$Register, $tmp1$$Register, $mem$$Address);
5626     __ cmp($tmp2$$Register, 0);
5627     __ b(loop, ne);
5628   %}
5629   ins_pipe( long_memory_op );
5630 %}
5631 
5632 instruct xaddI_aimmI(memoryex mem, aimmI add, iRegI res, iRegI tmp1, iRegI tmp2, flagsReg ccr) %{
5633   match(Set res (GetAndAddI mem add));
5634   effect(KILL ccr, TEMP tmp1, TEMP tmp2, TEMP res);
5635   size(20);
5636   format %{ "loop: \n\t"
5637             "LDREX    $res, $mem\n\t"
5638             "ADD      $tmp1, $res, $add\n\t"
5639             "STREX    $tmp2, $tmp1, $mem\n\t"
5640             "CMP      $tmp2, 0 \n\t"
5641             "B.ne     loop \n\t" %}
5642 
5643   ins_encode %{
5644     Label loop;
5645     __ bind(loop);
5646     __ ldrex($res$$Register,$mem$$Address);
5647     __ add($tmp1$$Register, $res$$Register, $add$$constant);
5648     __ strex($tmp2$$Register, $tmp1$$Register, $mem$$Address);
5649     __ cmp($tmp2$$Register, 0);
5650     __ b(loop, ne);
5651   %}
5652   ins_pipe( long_memory_op );
5653 %}
5654 
5655 instruct xaddI_reg(memoryex mem, iRegI add, iRegI res, iRegI tmp1, iRegI tmp2, flagsReg ccr) %{
5656   match(Set res (GetAndAddI mem add));
5657   effect(KILL ccr, TEMP tmp1, TEMP tmp2, TEMP res);
5658   size(20);
5659   format %{ "loop: \n\t"
5660             "LDREX    $res, $mem\n\t"
5661             "ADD      $tmp1, $res, $add\n\t"
5662             "STREX    $tmp2, $tmp1, $mem\n\t"
5663             "CMP      $tmp2, 0 \n\t"
5664             "B.ne     loop \n\t" %}
5665 
5666   ins_encode %{
5667     Label loop;
5668     __ bind(loop);
5669     __ ldrex($res$$Register,$mem$$Address);
5670     __ add($tmp1$$Register, $res$$Register, $add$$Register);
5671     __ strex($tmp2$$Register, $tmp1$$Register, $mem$$Address);
5672     __ cmp($tmp2$$Register, 0);
5673     __ b(loop, ne);
5674   %}
5675   ins_pipe( long_memory_op );
5676 %}
5677 
5678 instruct xaddL_reg_no_res(memoryex mem, iRegL add, Universe dummy, iRegLd tmp1, iRegI tmp2, flagsReg ccr) %{
5679   predicate(n->as_LoadStore()->result_not_used());
5680   match(Set dummy (GetAndAddL mem add));
5681   effect( KILL ccr, TEMP tmp1, TEMP tmp2);
5682   size(24);
5683   format %{ "loop: \n\t"
5684             "LDREXD   $tmp1, $mem\n\t"
5685             "ADDS     $tmp1.lo, $tmp1.lo, $add.lo\n\t"
5686             "ADC      $tmp1.hi, $tmp1.hi, $add.hi\n\t"
5687             "STREXD   $tmp2, $tmp1, $mem\n\t"
5688             "CMP      $tmp2, 0 \n\t"
5689             "B.ne     loop \n\t" %}
5690 
5691   ins_encode %{
5692     Label loop;
5693     __ bind(loop);
5694     __ ldrexd($tmp1$$Register, $mem$$Address);
5695     __ adds($tmp1$$Register, $tmp1$$Register, $add$$Register);
5696     __ adc($tmp1$$Register->successor(), $tmp1$$Register->successor(), $add$$Register->successor());
5697     __ strexd($tmp2$$Register, $tmp1$$Register, $mem$$Address);
5698     __ cmp($tmp2$$Register, 0);
5699     __ b(loop, ne);
5700   %}
5701   ins_pipe( long_memory_op );
5702 %}
5703 
5704 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
5705 // (hi($con$$constant), lo($con$$constant)) becomes
5706 instruct xaddL_immRot_no_res(memoryex mem, immLlowRot add, Universe dummy, iRegLd tmp1, iRegI tmp2, flagsReg ccr) %{
5707   predicate(n->as_LoadStore()->result_not_used());
5708   match(Set dummy (GetAndAddL mem add));
5709   effect( KILL ccr, TEMP tmp1, TEMP tmp2);
5710   size(24);
5711   format %{ "loop: \n\t"
5712             "LDREXD   $tmp1, $mem\n\t"
5713             "ADDS     $tmp1.lo, $tmp1.lo, $add\n\t"
5714             "ADC      $tmp1.hi, $tmp1.hi, 0\n\t"
5715             "STREXD   $tmp2, $tmp1, $mem\n\t"
5716             "CMP      $tmp2, 0 \n\t"
5717             "B.ne     loop \n\t" %}
5718 
5719   ins_encode %{
5720     Label loop;
5721     __ bind(loop);
5722     __ ldrexd($tmp1$$Register, $mem$$Address);
5723     __ adds($tmp1$$Register, $tmp1$$Register, $add$$constant);
5724     __ adc($tmp1$$Register->successor(), $tmp1$$Register->successor(), 0);
5725     __ strexd($tmp2$$Register, $tmp1$$Register, $mem$$Address);
5726     __ cmp($tmp2$$Register, 0);
5727     __ b(loop, ne);
5728   %}
5729   ins_pipe( long_memory_op );
5730 %}
5731 
5732 instruct xaddL_reg(memoryex mem, iRegL add, iRegLd res, iRegLd tmp1, iRegI tmp2, flagsReg ccr) %{
5733   match(Set res (GetAndAddL mem add));
5734   effect( KILL ccr, TEMP tmp1, TEMP tmp2, TEMP res);
5735   size(24);
5736   format %{ "loop: \n\t"
5737             "LDREXD   $res, $mem\n\t"
5738             "ADDS     $tmp1.lo, $res.lo, $add.lo\n\t"
5739             "ADC      $tmp1.hi, $res.hi, $add.hi\n\t"
5740             "STREXD   $tmp2, $tmp1, $mem\n\t"
5741             "CMP      $tmp2, 0 \n\t"
5742             "B.ne     loop \n\t" %}
5743 
5744   ins_encode %{
5745     Label loop;
5746     __ bind(loop);
5747     __ ldrexd($res$$Register, $mem$$Address);
5748     __ adds($tmp1$$Register, $res$$Register, $add$$Register);
5749     __ adc($tmp1$$Register->successor(), $res$$Register->successor(), $add$$Register->successor());
5750     __ strexd($tmp2$$Register, $tmp1$$Register, $mem$$Address);
5751     __ cmp($tmp2$$Register, 0);
5752     __ b(loop, ne);
5753   %}
5754   ins_pipe( long_memory_op );
5755 %}
5756 
5757 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
5758 // (hi($con$$constant), lo($con$$constant)) becomes
5759 instruct xaddL_immRot(memoryex mem, immLlowRot add, iRegLd res, iRegLd tmp1, iRegI tmp2, flagsReg ccr) %{
5760   match(Set res (GetAndAddL mem add));
5761   effect( KILL ccr, TEMP tmp1, TEMP tmp2, TEMP res);
5762   size(24);
5763   format %{ "loop: \n\t"
5764             "LDREXD   $res, $mem\n\t"
5765             "ADDS     $tmp1.lo, $res.lo, $add\n\t"
5766             "ADC      $tmp1.hi, $res.hi, 0\n\t"
5767             "STREXD   $tmp2, $tmp1, $mem\n\t"
5768             "CMP      $tmp2, 0 \n\t"
5769             "B.ne     loop \n\t" %}
5770 
5771   ins_encode %{
5772     Label loop;
5773     __ bind(loop);
5774     __ ldrexd($res$$Register, $mem$$Address);
5775     __ adds($tmp1$$Register, $res$$Register, $add$$constant);
5776     __ adc($tmp1$$Register->successor(), $res$$Register->successor(), 0);
5777     __ strexd($tmp2$$Register, $tmp1$$Register, $mem$$Address);
5778     __ cmp($tmp2$$Register, 0);
5779     __ b(loop, ne);
5780   %}
5781   ins_pipe( long_memory_op );
5782 %}
5783 
5784 instruct xchgI(memoryex mem, iRegI newval, iRegI res, iRegI tmp, flagsReg ccr) %{
5785   match(Set res (GetAndSetI mem newval));
5786   effect(KILL ccr, TEMP tmp, TEMP res);
5787   size(16);
5788   format %{ "loop: \n\t"
5789             "LDREX    $res, $mem\n\t"
5790             "STREX    $tmp, $newval, $mem\n\t"
5791             "CMP      $tmp, 0 \n\t"
5792             "B.ne     loop \n\t" %}
5793 
5794   ins_encode %{
5795     Label loop;
5796     __ bind(loop);
5797     __ ldrex($res$$Register,$mem$$Address);
5798     __ strex($tmp$$Register, $newval$$Register, $mem$$Address);
5799     __ cmp($tmp$$Register, 0);
5800     __ b(loop, ne);
5801   %}
5802   ins_pipe( long_memory_op );
5803 %}
5804 
5805 instruct xchgL(memoryex mem, iRegLd newval, iRegLd res, iRegI tmp, flagsReg ccr) %{
5806   match(Set res (GetAndSetL mem newval));
5807   effect( KILL ccr, TEMP tmp, TEMP res);
5808   size(16);
5809   format %{ "loop: \n\t"
5810             "LDREXD   $res, $mem\n\t"
5811             "STREXD   $tmp, $newval, $mem\n\t"
5812             "CMP      $tmp, 0 \n\t"
5813             "B.ne     loop \n\t" %}
5814 
5815   ins_encode %{
5816     Label loop;
5817     __ bind(loop);
5818     __ ldrexd($res$$Register, $mem$$Address);
5819     __ strexd($tmp$$Register, $newval$$Register, $mem$$Address);
5820     __ cmp($tmp$$Register, 0);
5821     __ b(loop, ne);
5822   %}
5823   ins_pipe( long_memory_op );
5824 %}
5825 
5826 instruct xchgP(memoryex mem, iRegP newval, iRegP res, iRegI tmp, flagsReg ccr) %{
5827   match(Set res (GetAndSetP mem newval));
5828   effect(KILL ccr, TEMP tmp, TEMP res);
5829   size(16);
5830   format %{ "loop: \n\t"
5831             "LDREX    $res, $mem\n\t"
5832             "STREX    $tmp, $newval, $mem\n\t"
5833             "CMP      $tmp, 0 \n\t"
5834             "B.ne     loop \n\t" %}
5835 
5836   ins_encode %{
5837     Label loop;
5838     __ bind(loop);
5839     __ ldrex($res$$Register,$mem$$Address);
5840     __ strex($tmp$$Register, $newval$$Register, $mem$$Address);
5841     __ cmp($tmp$$Register, 0);
5842     __ b(loop, ne);
5843   %}
5844   ins_pipe( long_memory_op );
5845 %}
5846 
5847 //---------------------
5848 // Subtraction Instructions
5849 // Register Subtraction
5850 instruct subI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
5851   match(Set dst (SubI src1 src2));
5852 
5853   size(4);
5854   format %{ "sub_32 $dst,$src1,$src2\t! int" %}
5855   ins_encode %{
5856     __ sub_32($dst$$Register, $src1$$Register, $src2$$Register);
5857   %}
5858   ins_pipe(ialu_reg_reg);
5859 %}
5860 
5861 instruct subshlI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
5862   match(Set dst (SubI src1 (LShiftI src2 src3)));
5863 
5864   size(4);
5865   format %{ "SUB    $dst,$src1,$src2<<$src3" %}
5866   ins_encode %{
5867     __ sub($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsl, $src3$$Register));
5868   %}
5869   ins_pipe(ialu_reg_reg);
5870 %}
5871 
5872 instruct subshlI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
5873   match(Set dst (SubI src1 (LShiftI src2 src3)));
5874 
5875   size(4);
5876   format %{ "sub_32 $dst,$src1,$src2<<$src3\t! int" %}
5877   ins_encode %{
5878     __ sub_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsl, $src3$$constant));
5879   %}
5880   ins_pipe(ialu_reg_reg);
5881 %}
5882 
5883 instruct subsarI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
5884   match(Set dst (SubI src1 (RShiftI src2 src3)));
5885 
5886   size(4);
5887   format %{ "SUB    $dst,$src1,$src2>>$src3" %}
5888   ins_encode %{
5889     __ sub($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, asr, $src3$$Register));
5890   %}
5891   ins_pipe(ialu_reg_reg);
5892 %}
5893 
5894 instruct subsarI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
5895   match(Set dst (SubI src1 (RShiftI src2 src3)));
5896 
5897   size(4);
5898   format %{ "sub_32 $dst,$src1,$src2>>$src3\t! int" %}
5899   ins_encode %{
5900     __ sub_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, asr, $src3$$constant));
5901   %}
5902   ins_pipe(ialu_reg_reg);
5903 %}
5904 
5905 instruct subshrI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
5906   match(Set dst (SubI src1 (URShiftI src2 src3)));
5907 
5908   size(4);
5909   format %{ "SUB    $dst,$src1,$src2>>>$src3" %}
5910   ins_encode %{
5911     __ sub($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsr, $src3$$Register));
5912   %}
5913   ins_pipe(ialu_reg_reg);
5914 %}
5915 
5916 instruct subshrI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
5917   match(Set dst (SubI src1 (URShiftI src2 src3)));
5918 
5919   size(4);
5920   format %{ "sub_32 $dst,$src1,$src2>>>$src3\t! int" %}
5921   ins_encode %{
5922     __ sub_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsr, $src3$$constant));
5923   %}
5924   ins_pipe(ialu_reg_reg);
5925 %}
5926 
5927 instruct rsbshlI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
5928   match(Set dst (SubI (LShiftI src1 src2) src3));
5929 
5930   size(4);
5931   format %{ "RSB    $dst,$src3,$src1<<$src2" %}
5932   ins_encode %{
5933     __ rsb($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, lsl, $src2$$Register));
5934   %}
5935   ins_pipe(ialu_reg_reg);
5936 %}
5937 
5938 instruct rsbshlI_reg_imm_reg(iRegI dst, iRegI src1, immU5 src2, iRegI src3) %{
5939   match(Set dst (SubI (LShiftI src1 src2) src3));
5940 
5941   size(4);
5942   format %{ "RSB    $dst,$src3,$src1<<$src2" %}
5943   ins_encode %{
5944     __ rsb($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, lsl, $src2$$constant));
5945   %}
5946   ins_pipe(ialu_reg_reg);
5947 %}
5948 
5949 instruct rsbsarI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
5950   match(Set dst (SubI (RShiftI src1 src2) src3));
5951 
5952   size(4);
5953   format %{ "RSB    $dst,$src3,$src1>>$src2" %}
5954   ins_encode %{
5955     __ rsb($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, asr, $src2$$Register));
5956   %}
5957   ins_pipe(ialu_reg_reg);
5958 %}
5959 
5960 instruct rsbsarI_reg_imm_reg(iRegI dst, iRegI src1, immU5 src2, iRegI src3) %{
5961   match(Set dst (SubI (RShiftI src1 src2) src3));
5962 
5963   size(4);
5964   format %{ "RSB    $dst,$src3,$src1>>$src2" %}
5965   ins_encode %{
5966     __ rsb($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, asr, $src2$$constant));
5967   %}
5968   ins_pipe(ialu_reg_reg);
5969 %}
5970 
5971 instruct rsbshrI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
5972   match(Set dst (SubI (URShiftI src1 src2) src3));
5973 
5974   size(4);
5975   format %{ "RSB    $dst,$src3,$src1>>>$src2" %}
5976   ins_encode %{
5977     __ rsb($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, lsr, $src2$$Register));
5978   %}
5979   ins_pipe(ialu_reg_reg);
5980 %}
5981 
5982 instruct rsbshrI_reg_imm_reg(iRegI dst, iRegI src1, immU5 src2, iRegI src3) %{
5983   match(Set dst (SubI (URShiftI src1 src2) src3));
5984 
5985   size(4);
5986   format %{ "RSB    $dst,$src3,$src1>>>$src2" %}
5987   ins_encode %{
5988     __ rsb($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, lsr, $src2$$constant));
5989   %}
5990   ins_pipe(ialu_reg_reg);
5991 %}
5992 
5993 // Immediate Subtraction
5994 instruct subI_reg_aimmI(iRegI dst, iRegI src1, aimmI src2) %{
5995   match(Set dst (SubI src1 src2));
5996 
5997   size(4);
5998   format %{ "sub_32 $dst,$src1,$src2\t! int" %}
5999   ins_encode %{
6000     __ sub_32($dst$$Register, $src1$$Register, $src2$$constant);
6001   %}
6002   ins_pipe(ialu_reg_imm);
6003 %}
6004 
6005 instruct subI_reg_immRotneg(iRegI dst, iRegI src1, aimmIneg src2) %{
6006   match(Set dst (AddI src1 src2));
6007 
6008   size(4);
6009   format %{ "sub_32 $dst,$src1,-($src2)\t! int" %}
6010   ins_encode %{
6011     __ sub_32($dst$$Register, $src1$$Register, -$src2$$constant);
6012   %}
6013   ins_pipe(ialu_reg_imm);
6014 %}
6015 
6016 instruct subI_immRot_reg(iRegI dst, immIRot src1, iRegI src2) %{
6017   match(Set dst (SubI src1 src2));
6018 
6019   size(4);
6020   format %{ "RSB    $dst,$src2,src1" %}
6021   ins_encode %{
6022     __ rsb($dst$$Register, $src2$$Register, $src1$$constant);
6023   %}
6024   ins_pipe(ialu_zero_reg);
6025 %}
6026 
6027 // Register Subtraction
6028 instruct subL_reg_reg(iRegL dst, iRegL src1, iRegL src2, flagsReg icc ) %{
6029   match(Set dst (SubL src1 src2));
6030   effect (KILL icc);
6031 
6032   size(8);
6033   format %{ "SUBS   $dst.lo,$src1.lo,$src2.lo\t! long\n\t"
6034             "SBC    $dst.hi,$src1.hi,$src2.hi" %}
6035   ins_encode %{
6036     __ subs($dst$$Register, $src1$$Register, $src2$$Register);
6037     __ sbc($dst$$Register->successor(), $src1$$Register->successor(), $src2$$Register->successor());
6038   %}
6039   ins_pipe(ialu_reg_reg);
6040 %}
6041 
6042 // TODO
6043 
6044 // Immediate Subtraction
6045 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
6046 // (hi($con$$constant), lo($con$$constant)) becomes
6047 instruct subL_reg_immRot(iRegL dst, iRegL src1, immLlowRot con, flagsReg icc) %{
6048   match(Set dst (SubL src1 con));
6049   effect (KILL icc);
6050 
6051   size(8);
6052   format %{ "SUB    $dst.lo,$src1.lo,$con\t! long\n\t"
6053             "SBC    $dst.hi,$src1.hi,0" %}
6054   ins_encode %{
6055     __ subs($dst$$Register, $src1$$Register, $con$$constant);
6056     __ sbc($dst$$Register->successor(), $src1$$Register->successor(), 0);
6057   %}
6058   ins_pipe(ialu_reg_imm);
6059 %}
6060 
6061 // Long negation
6062 instruct negL_reg_reg(iRegL dst, immL0 zero, iRegL src2, flagsReg icc) %{
6063   match(Set dst (SubL zero src2));
6064   effect (KILL icc);
6065 
6066   size(8);
6067   format %{ "RSBS   $dst.lo,$src2.lo,0\t! long\n\t"
6068             "RSC    $dst.hi,$src2.hi,0" %}
6069   ins_encode %{
6070     __ rsbs($dst$$Register, $src2$$Register, 0);
6071     __ rsc($dst$$Register->successor(), $src2$$Register->successor(), 0);
6072   %}
6073   ins_pipe(ialu_zero_reg);
6074 %}
6075 
6076 // Multiplication Instructions
6077 // Integer Multiplication
6078 // Register Multiplication
6079 instruct mulI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
6080   match(Set dst (MulI src1 src2));
6081 
6082   size(4);
6083   format %{ "mul_32 $dst,$src1,$src2" %}
6084   ins_encode %{
6085     __ mul_32($dst$$Register, $src1$$Register, $src2$$Register);
6086   %}
6087   ins_pipe(imul_reg_reg);
6088 %}
6089 
6090 instruct mulL_lo1_hi2(iRegL dst, iRegL src1, iRegL src2) %{
6091   effect(DEF dst, USE src1, USE src2);
6092   size(4);
6093   format %{ "MUL  $dst.hi,$src1.lo,$src2.hi\t! long" %}
6094   ins_encode %{
6095     __ mul($dst$$Register->successor(), $src1$$Register, $src2$$Register->successor());
6096   %}
6097   ins_pipe(imul_reg_reg);
6098 %}
6099 
6100 instruct mulL_hi1_lo2(iRegL dst, iRegL src1, iRegL src2) %{
6101   effect(USE_DEF dst, USE src1, USE src2);
6102   size(8);
6103   format %{ "MLA  $dst.hi,$src1.hi,$src2.lo,$dst.hi\t! long\n\t"
6104             "MOV  $dst.lo, 0"%}
6105   ins_encode %{
6106     __ mla($dst$$Register->successor(), $src1$$Register->successor(), $src2$$Register, $dst$$Register->successor());
6107     __ mov($dst$$Register, 0);
6108   %}
6109   ins_pipe(imul_reg_reg);
6110 %}
6111 
6112 instruct mulL_lo1_lo2(iRegL dst, iRegL src1, iRegL src2) %{
6113   effect(USE_DEF dst, USE src1, USE src2);
6114   size(4);
6115   format %{ "UMLAL  $dst.lo,$dst.hi,$src1,$src2\t! long" %}
6116   ins_encode %{
6117     __ umlal($dst$$Register, $dst$$Register->successor(), $src1$$Register, $src2$$Register);
6118   %}
6119   ins_pipe(imul_reg_reg);
6120 %}
6121 
6122 instruct mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
6123   match(Set dst (MulL src1 src2));
6124 
6125   expand %{
6126     mulL_lo1_hi2(dst, src1, src2);
6127     mulL_hi1_lo2(dst, src1, src2);
6128     mulL_lo1_lo2(dst, src1, src2);
6129   %}
6130 %}
6131 
6132 // Integer Division
6133 // Register Division
6134 instruct divI_reg_reg(R1RegI dst, R0RegI src1, R2RegI src2, LRRegP lr, flagsReg ccr) %{
6135   match(Set dst (DivI src1 src2));
6136   effect( KILL ccr, KILL src1, KILL src2, KILL lr);
6137   ins_cost((2+71)*DEFAULT_COST);
6138 
6139   format %{ "DIV   $dst,$src1,$src2 ! call to StubRoutines::Arm::idiv_irem_entry()" %}
6140   ins_encode %{
6141     __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::runtime_call_type);
6142   %}
6143   ins_pipe(sdiv_reg_reg);
6144 %}
6145 
6146 // Register Long Division
6147 instruct divL_reg_reg(R0R1RegL dst, R2R3RegL src1, R0R1RegL src2) %{
6148   match(Set dst (DivL src1 src2));
6149   effect(CALL);
6150   ins_cost(DEFAULT_COST*71);
6151   format %{ "DIVL  $src1,$src2,$dst\t! long ! call to SharedRuntime::ldiv" %}
6152   ins_encode %{
6153     address target = CAST_FROM_FN_PTR(address, SharedRuntime::ldiv);
6154     __ call(target, relocInfo::runtime_call_type);
6155   %}
6156   ins_pipe(divL_reg_reg);
6157 %}
6158 
6159 // Integer Remainder
6160 // Register Remainder
6161 instruct modI_reg_reg(R0RegI dst, R0RegI src1, R2RegI src2, R1RegI temp, LRRegP lr, flagsReg ccr ) %{
6162   match(Set dst (ModI src1 src2));
6163   effect( KILL ccr, KILL temp, KILL src2, KILL lr);
6164 
6165   format %{ "MODI   $dst,$src1,$src2\t ! call to StubRoutines::Arm::idiv_irem_entry" %}
6166   ins_encode %{
6167     __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::runtime_call_type);
6168   %}
6169   ins_pipe(sdiv_reg_reg);
6170 %}
6171 
6172 // Register Long Remainder
6173 instruct modL_reg_reg(R0R1RegL dst, R2R3RegL src1, R0R1RegL src2) %{
6174   match(Set dst (ModL src1 src2));
6175   effect(CALL);
6176   ins_cost(MEMORY_REF_COST); // FIXME
6177   format %{ "modL    $dst,$src1,$src2\t ! call to SharedRuntime::lrem" %}
6178   ins_encode %{
6179     address target = CAST_FROM_FN_PTR(address, SharedRuntime::lrem);
6180     __ call(target, relocInfo::runtime_call_type);
6181   %}
6182   ins_pipe(divL_reg_reg);
6183 %}
6184 
6185 // Integer Shift Instructions
6186 
6187 // Register Shift Left
6188 instruct shlI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
6189   match(Set dst (LShiftI src1 src2));
6190 
6191   size(4);
6192   format %{ "LSL  $dst,$src1,$src2 \n\t" %}
6193   ins_encode %{
6194     __ mov($dst$$Register, AsmOperand($src1$$Register, lsl, $src2$$Register));
6195   %}
6196   ins_pipe(ialu_reg_reg);
6197 %}
6198 
6199 // Register Shift Left Immediate
6200 instruct shlI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{
6201   match(Set dst (LShiftI src1 src2));
6202 
6203   size(4);
6204   format %{ "LSL    $dst,$src1,$src2\t! int" %}
6205   ins_encode %{
6206     __ logical_shift_left($dst$$Register, $src1$$Register, $src2$$constant);
6207   %}
6208   ins_pipe(ialu_reg_imm);
6209 %}
6210 
6211 instruct shlL_reg_reg_merge_hi(iRegL dst, iRegL src1, iRegI src2) %{
6212   effect(USE_DEF dst, USE src1, USE src2);
6213   size(4);
6214   format %{"OR  $dst.hi,$dst.hi,($src1.hi << $src2)"  %}
6215   ins_encode %{
6216     __ orr($dst$$Register->successor(), $dst$$Register->successor(), AsmOperand($src1$$Register->successor(), lsl, $src2$$Register));
6217   %}
6218   ins_pipe(ialu_reg_reg);
6219 %}
6220 
6221 instruct shlL_reg_reg_merge_lo(iRegL dst, iRegL src1, iRegI src2) %{
6222   effect(USE_DEF dst, USE src1, USE src2);
6223   size(4);
6224   format %{ "LSL  $dst.lo,$src1.lo,$src2 \n\t" %}
6225   ins_encode %{
6226     __ mov($dst$$Register, AsmOperand($src1$$Register, lsl, $src2$$Register));
6227   %}
6228   ins_pipe(ialu_reg_reg);
6229 %}
6230 
6231 instruct shlL_reg_reg_overlap(iRegL dst, iRegL src1, iRegI src2, flagsReg ccr) %{
6232   effect(DEF dst, USE src1, USE src2, KILL ccr);
6233   size(16);
6234   format %{ "SUBS  $dst.hi,$src2,32 \n\t"
6235             "LSLpl $dst.hi,$src1.lo,$dst.hi \n\t"
6236             "RSBmi $dst.hi,$dst.hi,0 \n\t"
6237             "LSRmi $dst.hi,$src1.lo,$dst.hi" %}
6238 
6239   ins_encode %{
6240     // $src1$$Register and $dst$$Register->successor() can't be the same
6241     __ subs($dst$$Register->successor(), $src2$$Register, 32);
6242     __ mov($dst$$Register->successor(), AsmOperand($src1$$Register, lsl, $dst$$Register->successor()), pl);
6243     __ rsb($dst$$Register->successor(), $dst$$Register->successor(), 0, mi);
6244     __ mov($dst$$Register->successor(), AsmOperand($src1$$Register, lsr, $dst$$Register->successor()), mi);
6245   %}
6246   ins_pipe(ialu_reg_reg);
6247 %}
6248 
6249 instruct shlL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{
6250   match(Set dst (LShiftL src1 src2));
6251 
6252   expand %{
6253     flagsReg ccr;
6254     shlL_reg_reg_overlap(dst, src1, src2, ccr);
6255     shlL_reg_reg_merge_hi(dst, src1, src2);
6256     shlL_reg_reg_merge_lo(dst, src1, src2);
6257   %}
6258 %}
6259 
6260 // Register Shift Left Immediate
6261 instruct shlL_reg_imm6(iRegL dst, iRegL src1, immU6Big src2) %{
6262   match(Set dst (LShiftL src1 src2));
6263 
6264   size(8);
6265   format %{ "LSL   $dst.hi,$src1.lo,$src2-32\t! or mov if $src2==32\n\t"
6266             "MOV   $dst.lo, 0" %}
6267   ins_encode %{
6268     if ($src2$$constant == 32) {
6269       __ mov($dst$$Register->successor(), $src1$$Register);
6270     } else {
6271       __ mov($dst$$Register->successor(), AsmOperand($src1$$Register, lsl, $src2$$constant-32));
6272     }
6273     __ mov($dst$$Register, 0);
6274   %}
6275   ins_pipe(ialu_reg_imm);
6276 %}
6277 
6278 instruct shlL_reg_imm5(iRegL dst, iRegL src1, immU5 src2) %{
6279   match(Set dst (LShiftL src1 src2));
6280 
6281   size(12);
6282   format %{ "LSL   $dst.hi,$src1.lo,$src2\n\t"
6283             "OR    $dst.hi, $dst.hi, $src1.lo >> 32-$src2\n\t"
6284             "LSL   $dst.lo,$src1.lo,$src2" %}
6285   ins_encode %{
6286     // The order of the following 3 instructions matters: src1.lo and
6287     // dst.hi can't overlap but src.hi and dst.hi can.
6288     __ mov($dst$$Register->successor(), AsmOperand($src1$$Register->successor(), lsl, $src2$$constant));
6289     __ orr($dst$$Register->successor(), $dst$$Register->successor(), AsmOperand($src1$$Register, lsr, 32-$src2$$constant));
6290     __ mov($dst$$Register, AsmOperand($src1$$Register, lsl, $src2$$constant));
6291   %}
6292   ins_pipe(ialu_reg_imm);
6293 %}
6294 
6295 // Register Arithmetic Shift Right
6296 instruct sarI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
6297   match(Set dst (RShiftI src1 src2));
6298   size(4);
6299   format %{ "ASR    $dst,$src1,$src2\t! int" %}
6300   ins_encode %{
6301     __ mov($dst$$Register, AsmOperand($src1$$Register, asr, $src2$$Register));
6302   %}
6303   ins_pipe(ialu_reg_reg);
6304 %}
6305 
6306 // Register Arithmetic Shift Right Immediate
6307 instruct sarI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{
6308   match(Set dst (RShiftI src1 src2));
6309 
6310   size(4);
6311   format %{ "ASR    $dst,$src1,$src2" %}
6312   ins_encode %{
6313     __ mov($dst$$Register, AsmOperand($src1$$Register, asr, $src2$$constant));
6314   %}
6315   ins_pipe(ialu_reg_imm);
6316 %}
6317 
6318 // Register Shift Right Arithmetic Long
6319 instruct sarL_reg_reg_merge_lo(iRegL dst, iRegL src1, iRegI src2) %{
6320   effect(USE_DEF dst, USE src1, USE src2);
6321   size(4);
6322   format %{ "OR  $dst.lo,$dst.lo,($src1.lo >> $src2)"  %}
6323   ins_encode %{
6324     __ orr($dst$$Register, $dst$$Register, AsmOperand($src1$$Register, lsr, $src2$$Register));
6325   %}
6326   ins_pipe(ialu_reg_reg);
6327 %}
6328 
6329 instruct sarL_reg_reg_merge_hi(iRegL dst, iRegL src1, iRegI src2) %{
6330   effect(USE_DEF dst, USE src1, USE src2);
6331   size(4);
6332   format %{ "ASR  $dst.hi,$src1.hi,$src2 \n\t" %}
6333   ins_encode %{
6334     __ mov($dst$$Register->successor(), AsmOperand($src1$$Register->successor(), asr, $src2$$Register));
6335   %}
6336   ins_pipe(ialu_reg_reg);
6337 %}
6338 
6339 instruct sarL_reg_reg_overlap(iRegL dst, iRegL src1, iRegI src2, flagsReg ccr) %{
6340   effect(DEF dst, USE src1, USE src2, KILL ccr);
6341   size(16);
6342   format %{ "SUBS  $dst.lo,$src2,32 \n\t"
6343             "ASRpl $dst.lo,$src1.hi,$dst.lo \n\t"
6344             "RSBmi $dst.lo,$dst.lo,0 \n\t"
6345             "LSLmi $dst.lo,$src1.hi,$dst.lo" %}
6346 
6347   ins_encode %{
6348     // $src1$$Register->successor() and $dst$$Register can't be the same
6349     __ subs($dst$$Register, $src2$$Register, 32);
6350     __ mov($dst$$Register, AsmOperand($src1$$Register->successor(), asr, $dst$$Register), pl);
6351     __ rsb($dst$$Register, $dst$$Register, 0, mi);
6352     __ mov($dst$$Register, AsmOperand($src1$$Register->successor(), lsl, $dst$$Register), mi);
6353   %}
6354   ins_pipe(ialu_reg_reg);
6355 %}
6356 
6357 instruct sarL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{
6358   match(Set dst (RShiftL src1 src2));
6359 
6360   expand %{
6361     flagsReg ccr;
6362     sarL_reg_reg_overlap(dst, src1, src2, ccr);
6363     sarL_reg_reg_merge_lo(dst, src1, src2);
6364     sarL_reg_reg_merge_hi(dst, src1, src2);
6365   %}
6366 %}
6367 
6368 // Register Shift Left Immediate
6369 instruct sarL_reg_imm6(iRegL dst, iRegL src1, immU6Big src2) %{
6370   match(Set dst (RShiftL src1 src2));
6371 
6372   size(8);
6373   format %{ "ASR   $dst.lo,$src1.hi,$src2-32\t! or mov if $src2==32\n\t"
6374             "ASR   $dst.hi,$src1.hi, $src2" %}
6375   ins_encode %{
6376     if ($src2$$constant == 32) {
6377       __ mov($dst$$Register, $src1$$Register->successor());
6378     } else{
6379       __ mov($dst$$Register, AsmOperand($src1$$Register->successor(), asr, $src2$$constant-32));
6380     }
6381     __ mov($dst$$Register->successor(), AsmOperand($src1$$Register->successor(), asr, 0));
6382   %}
6383 
6384   ins_pipe(ialu_reg_imm);
6385 %}
6386 
6387 instruct sarL_reg_imm5(iRegL dst, iRegL src1, immU5 src2) %{
6388   match(Set dst (RShiftL src1 src2));
6389   size(12);
6390   format %{ "LSR   $dst.lo,$src1.lo,$src2\n\t"
6391             "OR    $dst.lo, $dst.lo, $src1.hi << 32-$src2\n\t"
6392             "ASR   $dst.hi,$src1.hi,$src2" %}
6393   ins_encode %{
6394     // The order of the following 3 instructions matters: src1.lo and
6395     // dst.hi can't overlap but src.hi and dst.hi can.
6396     __ mov($dst$$Register, AsmOperand($src1$$Register, lsr, $src2$$constant));
6397     __ orr($dst$$Register, $dst$$Register, AsmOperand($src1$$Register->successor(), lsl, 32-$src2$$constant));
6398     __ mov($dst$$Register->successor(), AsmOperand($src1$$Register->successor(), asr, $src2$$constant));
6399   %}
6400   ins_pipe(ialu_reg_imm);
6401 %}
6402 
6403 // Register Shift Right
6404 instruct shrI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
6405   match(Set dst (URShiftI src1 src2));
6406   size(4);
6407   format %{ "LSR    $dst,$src1,$src2\t! int" %}
6408   ins_encode %{
6409     __ mov($dst$$Register, AsmOperand($src1$$Register, lsr, $src2$$Register));
6410   %}
6411   ins_pipe(ialu_reg_reg);
6412 %}
6413 
6414 // Register Shift Right Immediate
6415 instruct shrI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{
6416   match(Set dst (URShiftI src1 src2));
6417 
6418   size(4);
6419   format %{ "LSR    $dst,$src1,$src2" %}
6420   ins_encode %{
6421     __ mov($dst$$Register, AsmOperand($src1$$Register, lsr, $src2$$constant));
6422   %}
6423   ins_pipe(ialu_reg_imm);
6424 %}
6425 
6426 // Register Shift Right
6427 instruct shrL_reg_reg_merge_lo(iRegL dst, iRegL src1, iRegI src2) %{
6428   effect(USE_DEF dst, USE src1, USE src2);
6429   size(4);
6430   format %{ "OR   $dst.lo,$dst,($src1.lo >>> $src2)"  %}
6431   ins_encode %{
6432     __ orr($dst$$Register, $dst$$Register, AsmOperand($src1$$Register, lsr, $src2$$Register));
6433   %}
6434   ins_pipe(ialu_reg_reg);
6435 %}
6436 
6437 instruct shrL_reg_reg_merge_hi(iRegL dst, iRegL src1, iRegI src2) %{
6438   effect(USE_DEF dst, USE src1, USE src2);
6439   size(4);
6440   format %{ "LSR  $dst.hi,$src1.hi,$src2 \n\t" %}
6441   ins_encode %{
6442     __ mov($dst$$Register->successor(), AsmOperand($src1$$Register->successor(), lsr, $src2$$Register));
6443   %}
6444   ins_pipe(ialu_reg_reg);
6445 %}
6446 
6447 instruct shrL_reg_reg_overlap(iRegL dst, iRegL src1, iRegI src2, flagsReg ccr) %{
6448   effect(DEF dst, USE src1, USE src2, KILL ccr);
6449   size(16);
6450   format %{ "SUBS  $dst,$src2,32 \n\t"
6451             "LSRpl $dst,$src1.hi,$dst \n\t"
6452             "RSBmi $dst,$dst,0 \n\t"
6453             "LSLmi $dst,$src1.hi,$dst" %}
6454 
6455   ins_encode %{
6456     // $src1$$Register->successor() and $dst$$Register can't be the same
6457     __ subs($dst$$Register, $src2$$Register, 32);
6458     __ mov($dst$$Register, AsmOperand($src1$$Register->successor(), lsr, $dst$$Register), pl);
6459     __ rsb($dst$$Register, $dst$$Register, 0, mi);
6460     __ mov($dst$$Register, AsmOperand($src1$$Register->successor(), lsl, $dst$$Register), mi);
6461   %}
6462   ins_pipe(ialu_reg_reg);
6463 %}
6464 
6465 instruct shrL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{
6466   match(Set dst (URShiftL src1 src2));
6467 
6468   expand %{
6469     flagsReg ccr;
6470     shrL_reg_reg_overlap(dst, src1, src2, ccr);
6471     shrL_reg_reg_merge_lo(dst, src1, src2);
6472     shrL_reg_reg_merge_hi(dst, src1, src2);
6473   %}
6474 %}
6475 
6476 // Register Shift Right Immediate
6477 instruct shrL_reg_imm6(iRegL dst, iRegL src1, immU6Big src2) %{
6478   match(Set dst (URShiftL src1 src2));
6479 
6480   size(8);
6481   format %{ "LSR   $dst.lo,$src1.hi,$src2-32\t! or mov if $src2==32\n\t"
6482             "MOV   $dst.hi, 0" %}
6483   ins_encode %{
6484     if ($src2$$constant == 32) {
6485       __ mov($dst$$Register, $src1$$Register->successor());
6486     } else {
6487       __ mov($dst$$Register, AsmOperand($src1$$Register->successor(), lsr, $src2$$constant-32));
6488     }
6489     __ mov($dst$$Register->successor(), 0);
6490   %}
6491 
6492   ins_pipe(ialu_reg_imm);
6493 %}
6494 
6495 instruct shrL_reg_imm5(iRegL dst, iRegL src1, immU5 src2) %{
6496   match(Set dst (URShiftL src1 src2));
6497 
6498   size(12);
6499   format %{ "LSR   $dst.lo,$src1.lo,$src2\n\t"
6500             "OR    $dst.lo, $dst.lo, $src1.hi << 32-$src2\n\t"
6501             "LSR   $dst.hi,$src1.hi,$src2" %}
6502   ins_encode %{
6503     // The order of the following 3 instructions matters: src1.lo and
6504     // dst.hi can't overlap but src.hi and dst.hi can.
6505     __ mov($dst$$Register, AsmOperand($src1$$Register, lsr, $src2$$constant));
6506     __ orr($dst$$Register, $dst$$Register, AsmOperand($src1$$Register->successor(), lsl, 32-$src2$$constant));
6507     __ mov($dst$$Register->successor(), AsmOperand($src1$$Register->successor(), lsr, $src2$$constant));
6508   %}
6509   ins_pipe(ialu_reg_imm);
6510 %}
6511 
6512 
6513 instruct shrP_reg_imm5(iRegX dst, iRegP src1, immU5 src2) %{
6514   match(Set dst (URShiftI (CastP2X src1) src2));
6515   size(4);
6516   format %{ "LSR    $dst,$src1,$src2\t! Cast ptr $src1 to int and shift" %}
6517   ins_encode %{
6518     __ logical_shift_right($dst$$Register, $src1$$Register, $src2$$constant);
6519   %}
6520   ins_pipe(ialu_reg_imm);
6521 %}
6522 
6523 //----------Floating Point Arithmetic Instructions-----------------------------
6524 
6525 //  Add float single precision
6526 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
6527   match(Set dst (AddF src1 src2));
6528 
6529   size(4);
6530   format %{ "FADDS  $dst,$src1,$src2" %}
6531   ins_encode %{
6532     __ add_float($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
6533   %}
6534 
6535   ins_pipe(faddF_reg_reg);
6536 %}
6537 
6538 //  Add float double precision
6539 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
6540   match(Set dst (AddD src1 src2));
6541 
6542   size(4);
6543   format %{ "FADDD  $dst,$src1,$src2" %}
6544   ins_encode %{
6545     __ add_double($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
6546   %}
6547 
6548   ins_pipe(faddD_reg_reg);
6549 %}
6550 
6551 //  Sub float single precision
6552 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
6553   match(Set dst (SubF src1 src2));
6554 
6555   size(4);
6556   format %{ "FSUBS  $dst,$src1,$src2" %}
6557   ins_encode %{
6558     __ sub_float($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
6559   %}
6560   ins_pipe(faddF_reg_reg);
6561 %}
6562 
6563 //  Sub float double precision
6564 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
6565   match(Set dst (SubD src1 src2));
6566 
6567   size(4);
6568   format %{ "FSUBD  $dst,$src1,$src2" %}
6569   ins_encode %{
6570     __ sub_double($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
6571   %}
6572   ins_pipe(faddD_reg_reg);
6573 %}
6574 
6575 //  Mul float single precision
6576 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
6577   match(Set dst (MulF src1 src2));
6578 
6579   size(4);
6580   format %{ "FMULS  $dst,$src1,$src2" %}
6581   ins_encode %{
6582     __ mul_float($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
6583   %}
6584 
6585   ins_pipe(fmulF_reg_reg);
6586 %}
6587 
6588 //  Mul float double precision
6589 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
6590   match(Set dst (MulD src1 src2));
6591 
6592   size(4);
6593   format %{ "FMULD  $dst,$src1,$src2" %}
6594   ins_encode %{
6595     __ mul_double($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
6596   %}
6597 
6598   ins_pipe(fmulD_reg_reg);
6599 %}
6600 
6601 //  Div float single precision
6602 instruct divF_reg_reg(regF dst, regF src1, regF src2) %{
6603   match(Set dst (DivF src1 src2));
6604 
6605   size(4);
6606   format %{ "FDIVS  $dst,$src1,$src2" %}
6607   ins_encode %{
6608     __ div_float($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
6609   %}
6610 
6611   ins_pipe(fdivF_reg_reg);
6612 %}
6613 
6614 //  Div float double precision
6615 instruct divD_reg_reg(regD dst, regD src1, regD src2) %{
6616   match(Set dst (DivD src1 src2));
6617 
6618   size(4);
6619   format %{ "FDIVD  $dst,$src1,$src2" %}
6620   ins_encode %{
6621     __ div_double($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
6622   %}
6623 
6624   ins_pipe(fdivD_reg_reg);
6625 %}
6626 
6627 //  Absolute float double precision
6628 instruct absD_reg(regD dst, regD src) %{
6629   match(Set dst (AbsD src));
6630 
6631   size(4);
6632   format %{ "FABSd  $dst,$src" %}
6633   ins_encode %{
6634     __ abs_double($dst$$FloatRegister, $src$$FloatRegister);
6635   %}
6636   ins_pipe(faddD_reg);
6637 %}
6638 
6639 //  Absolute float single precision
6640 instruct absF_reg(regF dst, regF src) %{
6641   match(Set dst (AbsF src));
6642   format %{ "FABSs  $dst,$src" %}
6643   ins_encode %{
6644     __ abs_float($dst$$FloatRegister, $src$$FloatRegister);
6645   %}
6646   ins_pipe(faddF_reg);
6647 %}
6648 
6649 instruct negF_reg(regF dst, regF src) %{
6650   match(Set dst (NegF src));
6651 
6652   size(4);
6653   format %{ "FNEGs  $dst,$src" %}
6654   ins_encode %{
6655     __ neg_float($dst$$FloatRegister, $src$$FloatRegister);
6656   %}
6657   ins_pipe(faddF_reg);
6658 %}
6659 
6660 instruct negD_reg(regD dst, regD src) %{
6661   match(Set dst (NegD src));
6662 
6663   format %{ "FNEGd  $dst,$src" %}
6664   ins_encode %{
6665     __ neg_double($dst$$FloatRegister, $src$$FloatRegister);
6666   %}
6667   ins_pipe(faddD_reg);
6668 %}
6669 
6670 //  Sqrt float double precision
6671 instruct sqrtF_reg_reg(regF dst, regF src) %{
6672   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
6673 
6674   size(4);
6675   format %{ "FSQRTS $dst,$src" %}
6676   ins_encode %{
6677     __ sqrt_float($dst$$FloatRegister, $src$$FloatRegister);
6678   %}
6679   ins_pipe(fdivF_reg_reg);
6680 %}
6681 
6682 //  Sqrt float double precision
6683 instruct sqrtD_reg_reg(regD dst, regD src) %{
6684   match(Set dst (SqrtD src));
6685 
6686   size(4);
6687   format %{ "FSQRTD $dst,$src" %}
6688   ins_encode %{
6689     __ sqrt_double($dst$$FloatRegister, $src$$FloatRegister);
6690   %}
6691   ins_pipe(fdivD_reg_reg);
6692 %}
6693 
6694 //----------Logical Instructions-----------------------------------------------
6695 // And Instructions
6696 // Register And
6697 instruct andI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
6698   match(Set dst (AndI src1 src2));
6699 
6700   size(4);
6701   format %{ "and_32 $dst,$src1,$src2" %}
6702   ins_encode %{
6703     __ and_32($dst$$Register, $src1$$Register, $src2$$Register);
6704   %}
6705   ins_pipe(ialu_reg_reg);
6706 %}
6707 
6708 instruct andshlI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
6709   match(Set dst (AndI src1 (LShiftI src2 src3)));
6710 
6711   size(4);
6712   format %{ "AND    $dst,$src1,$src2<<$src3" %}
6713   ins_encode %{
6714     __ andr($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsl, $src3$$Register));
6715   %}
6716   ins_pipe(ialu_reg_reg);
6717 %}
6718 
6719 instruct andshlI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
6720   match(Set dst (AndI src1 (LShiftI src2 src3)));
6721 
6722   size(4);
6723   format %{ "and_32 $dst,$src1,$src2<<$src3" %}
6724   ins_encode %{
6725     __ and_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsl, $src3$$constant));
6726   %}
6727   ins_pipe(ialu_reg_reg);
6728 %}
6729 
6730 instruct andsarI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
6731   match(Set dst (AndI src1 (RShiftI src2 src3)));
6732 
6733   size(4);
6734   format %{ "AND    $dst,$src1,$src2>>$src3" %}
6735   ins_encode %{
6736     __ andr($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, asr, $src3$$Register));
6737   %}
6738   ins_pipe(ialu_reg_reg);
6739 %}
6740 
6741 instruct andsarI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
6742   match(Set dst (AndI src1 (RShiftI src2 src3)));
6743 
6744   size(4);
6745   format %{ "and_32 $dst,$src1,$src2>>$src3" %}
6746   ins_encode %{
6747     __ and_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, asr, $src3$$constant));
6748   %}
6749   ins_pipe(ialu_reg_reg);
6750 %}
6751 
6752 instruct andshrI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
6753   match(Set dst (AndI src1 (URShiftI src2 src3)));
6754 
6755   size(4);
6756   format %{ "AND    $dst,$src1,$src2>>>$src3" %}
6757   ins_encode %{
6758     __ andr($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsr, $src3$$Register));
6759   %}
6760   ins_pipe(ialu_reg_reg);
6761 %}
6762 
6763 instruct andshrI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
6764   match(Set dst (AndI src1 (URShiftI src2 src3)));
6765 
6766   size(4);
6767   format %{ "and_32 $dst,$src1,$src2>>>$src3" %}
6768   ins_encode %{
6769     __ and_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsr, $src3$$constant));
6770   %}
6771   ins_pipe(ialu_reg_reg);
6772 %}
6773 
6774 // Immediate And
6775 instruct andI_reg_limm(iRegI dst, iRegI src1, limmI src2) %{
6776   match(Set dst (AndI src1 src2));
6777 
6778   size(4);
6779   format %{ "and_32 $dst,$src1,$src2\t! int" %}
6780   ins_encode %{
6781     __ and_32($dst$$Register, $src1$$Register, $src2$$constant);
6782   %}
6783   ins_pipe(ialu_reg_imm);
6784 %}
6785 
6786 instruct andI_reg_limmn(iRegI dst, iRegI src1, limmIn src2) %{
6787   match(Set dst (AndI src1 src2));
6788 
6789   size(4);
6790   format %{ "bic    $dst,$src1,~$src2\t! int" %}
6791   ins_encode %{
6792     __ bic($dst$$Register, $src1$$Register, ~$src2$$constant);
6793   %}
6794   ins_pipe(ialu_reg_imm);
6795 %}
6796 
6797 // Register And Long
6798 instruct andL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
6799   match(Set dst (AndL src1 src2));
6800 
6801   ins_cost(DEFAULT_COST);
6802   size(8);
6803   format %{ "AND    $dst,$src1,$src2\t! long" %}
6804   ins_encode %{
6805     __ andr($dst$$Register, $src1$$Register, $src2$$Register);
6806     __ andr($dst$$Register->successor(), $src1$$Register->successor(), $src2$$Register->successor());
6807   %}
6808   ins_pipe(ialu_reg_reg);
6809 %}
6810 
6811 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
6812 // (hi($con$$constant), lo($con$$constant)) becomes
6813 instruct andL_reg_immRot(iRegL dst, iRegL src1, immLlowRot con) %{
6814   match(Set dst (AndL src1 con));
6815   ins_cost(DEFAULT_COST);
6816   size(8);
6817   format %{ "AND    $dst,$src1,$con\t! long" %}
6818   ins_encode %{
6819     __ andr($dst$$Register, $src1$$Register, $con$$constant);
6820     __ andr($dst$$Register->successor(), $src1$$Register->successor(), 0);
6821   %}
6822   ins_pipe(ialu_reg_imm);
6823 %}
6824 
6825 // Or Instructions
6826 // Register Or
6827 instruct orI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
6828   match(Set dst (OrI src1 src2));
6829 
6830   size(4);
6831   format %{ "orr_32 $dst,$src1,$src2\t! int" %}
6832   ins_encode %{
6833     __ orr_32($dst$$Register, $src1$$Register, $src2$$Register);
6834   %}
6835   ins_pipe(ialu_reg_reg);
6836 %}
6837 
6838 instruct orshlI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
6839   match(Set dst (OrI src1 (LShiftI src2 src3)));
6840 
6841   size(4);
6842   format %{ "OR    $dst,$src1,$src2<<$src3" %}
6843   ins_encode %{
6844     __ orr($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsl, $src3$$Register));
6845   %}
6846   ins_pipe(ialu_reg_reg);
6847 %}
6848 
6849 instruct orshlI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
6850   match(Set dst (OrI src1 (LShiftI src2 src3)));
6851 
6852   size(4);
6853   format %{ "orr_32 $dst,$src1,$src2<<$src3" %}
6854   ins_encode %{
6855     __ orr_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsl, $src3$$constant));
6856   %}
6857   ins_pipe(ialu_reg_reg);
6858 %}
6859 
6860 instruct orsarI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
6861   match(Set dst (OrI src1 (RShiftI src2 src3)));
6862 
6863   size(4);
6864   format %{ "OR    $dst,$src1,$src2>>$src3" %}
6865   ins_encode %{
6866     __ orr($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, asr, $src3$$Register));
6867   %}
6868   ins_pipe(ialu_reg_reg);
6869 %}
6870 
6871 instruct orsarI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
6872   match(Set dst (OrI src1 (RShiftI src2 src3)));
6873 
6874   size(4);
6875   format %{ "orr_32 $dst,$src1,$src2>>$src3" %}
6876   ins_encode %{
6877     __ orr_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, asr, $src3$$constant));
6878   %}
6879   ins_pipe(ialu_reg_reg);
6880 %}
6881 
6882 instruct orshrI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
6883   match(Set dst (OrI src1 (URShiftI src2 src3)));
6884 
6885   size(4);
6886   format %{ "OR    $dst,$src1,$src2>>>$src3" %}
6887   ins_encode %{
6888     __ orr($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsr, $src3$$Register));
6889   %}
6890   ins_pipe(ialu_reg_reg);
6891 %}
6892 
6893 instruct orshrI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
6894   match(Set dst (OrI src1 (URShiftI src2 src3)));
6895 
6896   size(4);
6897   format %{ "orr_32 $dst,$src1,$src2>>>$src3" %}
6898   ins_encode %{
6899     __ orr_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsr, $src3$$constant));
6900   %}
6901   ins_pipe(ialu_reg_reg);
6902 %}
6903 
6904 // Immediate Or
6905 instruct orI_reg_limm(iRegI dst, iRegI src1, limmI src2) %{
6906   match(Set dst (OrI src1 src2));
6907 
6908   size(4);
6909   format %{ "orr_32  $dst,$src1,$src2" %}
6910   ins_encode %{
6911     __ orr_32($dst$$Register, $src1$$Register, $src2$$constant);
6912   %}
6913   ins_pipe(ialu_reg_imm);
6914 %}
6915 // TODO: orn_32 with limmIn
6916 
6917 // Register Or Long
6918 instruct orL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
6919   match(Set dst (OrL src1 src2));
6920 
6921   ins_cost(DEFAULT_COST);
6922   size(8);
6923   format %{ "OR     $dst.lo,$src1.lo,$src2.lo\t! long\n\t"
6924             "OR     $dst.hi,$src1.hi,$src2.hi" %}
6925   ins_encode %{
6926     __ orr($dst$$Register, $src1$$Register, $src2$$Register);
6927     __ orr($dst$$Register->successor(), $src1$$Register->successor(), $src2$$Register->successor());
6928   %}
6929   ins_pipe(ialu_reg_reg);
6930 %}
6931 
6932 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
6933 // (hi($con$$constant), lo($con$$constant)) becomes
6934 instruct orL_reg_immRot(iRegL dst, iRegL src1, immLlowRot con) %{
6935   match(Set dst (OrL src1 con));
6936   ins_cost(DEFAULT_COST);
6937   size(8);
6938   format %{ "OR     $dst.lo,$src1.lo,$con\t! long\n\t"
6939             "OR     $dst.hi,$src1.hi,$con" %}
6940   ins_encode %{
6941     __ orr($dst$$Register, $src1$$Register, $con$$constant);
6942     __ orr($dst$$Register->successor(), $src1$$Register->successor(), 0);
6943   %}
6944   ins_pipe(ialu_reg_imm);
6945 %}
6946 
6947 #ifdef TODO
6948 // Use SPRegP to match Rthread (TLS register) without spilling.
6949 // Use store_ptr_RegP to match Rthread (TLS register) without spilling.
6950 // Use sp_ptr_RegP to match Rthread (TLS register) without spilling.
6951 instruct orI_reg_castP2X(iRegI dst, iRegI src1, sp_ptr_RegP src2) %{
6952   match(Set dst (OrI src1 (CastP2X src2)));
6953   size(4);
6954   format %{ "OR     $dst,$src1,$src2" %}
6955   ins_encode %{
6956     __ orr($dst$$Register, $src1$$Register, $src2$$Register);
6957   %}
6958   ins_pipe(ialu_reg_reg);
6959 %}
6960 #endif
6961 
6962 // Xor Instructions
6963 // Register Xor
6964 instruct xorI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
6965   match(Set dst (XorI src1 src2));
6966 
6967   size(4);
6968   format %{ "eor_32 $dst,$src1,$src2" %}
6969   ins_encode %{
6970     __ eor_32($dst$$Register, $src1$$Register, $src2$$Register);
6971   %}
6972   ins_pipe(ialu_reg_reg);
6973 %}
6974 
6975 instruct xorshlI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
6976   match(Set dst (XorI src1 (LShiftI src2 src3)));
6977 
6978   size(4);
6979   format %{ "XOR    $dst,$src1,$src2<<$src3" %}
6980   ins_encode %{
6981     __ eor($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsl, $src3$$Register));
6982   %}
6983   ins_pipe(ialu_reg_reg);
6984 %}
6985 
6986 instruct xorshlI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
6987   match(Set dst (XorI src1 (LShiftI src2 src3)));
6988 
6989   size(4);
6990   format %{ "eor_32 $dst,$src1,$src2<<$src3" %}
6991   ins_encode %{
6992     __ eor_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsl, $src3$$constant));
6993   %}
6994   ins_pipe(ialu_reg_reg);
6995 %}
6996 
6997 instruct xorsarI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
6998   match(Set dst (XorI src1 (RShiftI src2 src3)));
6999 
7000   size(4);
7001   format %{ "XOR    $dst,$src1,$src2>>$src3" %}
7002   ins_encode %{
7003     __ eor($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, asr, $src3$$Register));
7004   %}
7005   ins_pipe(ialu_reg_reg);
7006 %}
7007 
7008 instruct xorsarI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
7009   match(Set dst (XorI src1 (RShiftI src2 src3)));
7010 
7011   size(4);
7012   format %{ "eor_32 $dst,$src1,$src2>>$src3" %}
7013   ins_encode %{
7014     __ eor_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, asr, $src3$$constant));
7015   %}
7016   ins_pipe(ialu_reg_reg);
7017 %}
7018 
7019 instruct xorshrI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
7020   match(Set dst (XorI src1 (URShiftI src2 src3)));
7021 
7022   size(4);
7023   format %{ "XOR    $dst,$src1,$src2>>>$src3" %}
7024   ins_encode %{
7025     __ eor($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsr, $src3$$Register));
7026   %}
7027   ins_pipe(ialu_reg_reg);
7028 %}
7029 
7030 instruct xorshrI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
7031   match(Set dst (XorI src1 (URShiftI src2 src3)));
7032 
7033   size(4);
7034   format %{ "eor_32 $dst,$src1,$src2>>>$src3" %}
7035   ins_encode %{
7036     __ eor_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsr, $src3$$constant));
7037   %}
7038   ins_pipe(ialu_reg_reg);
7039 %}
7040 
7041 // Immediate Xor
7042 instruct xorI_reg_imm(iRegI dst, iRegI src1, limmI src2) %{
7043   match(Set dst (XorI src1 src2));
7044 
7045   size(4);
7046   format %{ "eor_32 $dst,$src1,$src2" %}
7047   ins_encode %{
7048     __ eor_32($dst$$Register, $src1$$Register, $src2$$constant);
7049   %}
7050   ins_pipe(ialu_reg_imm);
7051 %}
7052 
7053 // Register Xor Long
7054 instruct xorL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
7055   match(Set dst (XorL src1 src2));
7056   ins_cost(DEFAULT_COST);
7057   size(8);
7058   format %{ "XOR     $dst.hi,$src1.hi,$src2.hi\t! long\n\t"
7059             "XOR     $dst.lo,$src1.lo,$src2.lo\t! long" %}
7060   ins_encode %{
7061     __ eor($dst$$Register, $src1$$Register, $src2$$Register);
7062     __ eor($dst$$Register->successor(), $src1$$Register->successor(), $src2$$Register->successor());
7063   %}
7064   ins_pipe(ialu_reg_reg);
7065 %}
7066 
7067 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
7068 // (hi($con$$constant), lo($con$$constant)) becomes
7069 instruct xorL_reg_immRot(iRegL dst, iRegL src1, immLlowRot con) %{
7070   match(Set dst (XorL src1 con));
7071   ins_cost(DEFAULT_COST);
7072   size(8);
7073   format %{ "XOR     $dst.hi,$src1.hi,$con\t! long\n\t"
7074             "XOR     $dst.lo,$src1.lo,0\t! long" %}
7075   ins_encode %{
7076     __ eor($dst$$Register, $src1$$Register, $con$$constant);
7077     __ eor($dst$$Register->successor(), $src1$$Register->successor(), 0);
7078   %}
7079   ins_pipe(ialu_reg_imm);
7080 %}
7081 
7082 //----------Convert to Boolean-------------------------------------------------
7083 instruct convI2B( iRegI dst, iRegI src, flagsReg ccr ) %{
7084   match(Set dst (Conv2B src));
7085   effect(KILL ccr);
7086   size(12);
7087   ins_cost(DEFAULT_COST*2);
7088   format %{ "TST    $src,$src \n\t"
7089             "MOV    $dst, 0   \n\t"
7090             "MOV.ne $dst, 1" %}
7091   ins_encode %{ // FIXME: can do better?
7092     __ tst($src$$Register, $src$$Register);
7093     __ mov($dst$$Register, 0);
7094     __ mov($dst$$Register, 1, ne);
7095   %}
7096   ins_pipe(ialu_reg_ialu);
7097 %}
7098 
7099 instruct convP2B( iRegI dst, iRegP src, flagsReg ccr ) %{
7100   match(Set dst (Conv2B src));
7101   effect(KILL ccr);
7102   size(12);
7103   ins_cost(DEFAULT_COST*2);
7104   format %{ "TST    $src,$src \n\t"
7105             "MOV    $dst, 0   \n\t"
7106             "MOV.ne $dst, 1" %}
7107   ins_encode %{
7108     __ tst($src$$Register, $src$$Register);
7109     __ mov($dst$$Register, 0);
7110     __ mov($dst$$Register, 1, ne);
7111   %}
7112   ins_pipe(ialu_reg_ialu);
7113 %}
7114 
7115 instruct cmpLTMask_reg_reg( iRegI dst, iRegI p, iRegI q, flagsReg ccr ) %{
7116   match(Set dst (CmpLTMask p q));
7117   effect( KILL ccr );
7118   ins_cost(DEFAULT_COST*3);
7119   format %{ "CMP    $p,$q\n\t"
7120             "MOV    $dst, #0\n\t"
7121             "MOV.lt $dst, #-1" %}
7122   ins_encode %{
7123     __ cmp($p$$Register, $q$$Register);
7124     __ mov($dst$$Register, 0);
7125     __ mvn($dst$$Register, 0, lt);
7126   %}
7127   ins_pipe(ialu_reg_reg_ialu);
7128 %}
7129 
7130 instruct cmpLTMask_reg_imm( iRegI dst, iRegI p, aimmI q, flagsReg ccr ) %{
7131   match(Set dst (CmpLTMask p q));
7132   effect( KILL ccr );
7133   ins_cost(DEFAULT_COST*3);
7134   format %{ "CMP    $p,$q\n\t"
7135             "MOV    $dst, #0\n\t"
7136             "MOV.lt $dst, #-1" %}
7137   ins_encode %{
7138     __ cmp($p$$Register, $q$$constant);
7139     __ mov($dst$$Register, 0);
7140     __ mvn($dst$$Register, 0, lt);
7141   %}
7142   ins_pipe(ialu_reg_reg_ialu);
7143 %}
7144 
7145 instruct cadd_cmpLTMask3( iRegI p, iRegI q, iRegI y, iRegI z, flagsReg ccr ) %{
7146   match(Set z (AddI (AndI (CmpLTMask p q) y) z));
7147   effect( KILL ccr );
7148   ins_cost(DEFAULT_COST*2);
7149   format %{ "CMP    $p,$q\n\t"
7150             "ADD.lt $z,$y,$z" %}
7151   ins_encode %{
7152     __ cmp($p$$Register, $q$$Register);
7153     __ add($z$$Register, $y$$Register, $z$$Register, lt);
7154   %}
7155   ins_pipe( cadd_cmpltmask );
7156 %}
7157 
7158 // FIXME: remove unused "dst"
7159 instruct cadd_cmpLTMask4( iRegI dst, iRegI p, aimmI q, iRegI y, iRegI z, flagsReg ccr ) %{
7160   match(Set z (AddI (AndI (CmpLTMask p q) y) z));
7161   effect( KILL ccr );
7162   ins_cost(DEFAULT_COST*2);
7163   format %{ "CMP    $p,$q\n\t"
7164             "ADD.lt $z,$y,$z" %}
7165   ins_encode %{
7166     __ cmp($p$$Register, $q$$constant);
7167     __ add($z$$Register, $y$$Register, $z$$Register, lt);
7168   %}
7169   ins_pipe( cadd_cmpltmask );
7170 %}
7171 
7172 instruct cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, flagsReg ccr ) %{
7173   match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q)));
7174   effect( KILL ccr );
7175   ins_cost(DEFAULT_COST*2);
7176   format %{ "SUBS   $p,$p,$q\n\t"
7177             "ADD.lt $p,$y,$p" %}
7178   ins_encode %{
7179     __ subs($p$$Register, $p$$Register, $q$$Register);
7180     __ add($p$$Register, $y$$Register, $p$$Register, lt);
7181   %}
7182   ins_pipe( cadd_cmpltmask );
7183 %}
7184 
7185 //----------Arithmetic Conversion Instructions---------------------------------
7186 // The conversions operations are all Alpha sorted.  Please keep it that way!
7187 
7188 instruct convD2F_reg(regF dst, regD src) %{
7189   match(Set dst (ConvD2F src));
7190   size(4);
7191   format %{ "FCVTSD  $dst,$src" %}
7192   ins_encode %{
7193     __ convert_d2f($dst$$FloatRegister, $src$$FloatRegister);
7194   %}
7195   ins_pipe(fcvtD2F);
7196 %}
7197 
7198 // Convert a double to an int in a float register.
7199 // If the double is a NAN, stuff a zero in instead.
7200 
7201 instruct convD2I_reg_reg(iRegI dst, regD src, regF tmp) %{
7202   match(Set dst (ConvD2I src));
7203   effect( TEMP tmp );
7204   ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); // FIXME
7205   format %{ "FTOSIZD  $tmp,$src\n\t"
7206             "FMRS     $dst, $tmp" %}
7207   ins_encode %{
7208     __ ftosizd($tmp$$FloatRegister, $src$$FloatRegister);
7209     __ fmrs($dst$$Register, $tmp$$FloatRegister);
7210   %}
7211   ins_pipe(fcvtD2I);
7212 %}
7213 
7214 // Convert a double to a long in a double register.
7215 // If the double is a NAN, stuff a zero in instead.
7216 
7217 // Double to Long conversion
7218 instruct convD2L_reg(R0R1RegL dst, regD src) %{
7219   match(Set dst (ConvD2L src));
7220   effect(CALL);
7221   ins_cost(MEMORY_REF_COST); // FIXME
7222   format %{ "convD2L    $dst,$src\t ! call to SharedRuntime::d2l" %}
7223   ins_encode %{
7224 #ifndef __ABI_HARD__
7225     __ fmrrd($dst$$Register, $dst$$Register->successor(), $src$$FloatRegister);
7226 #else
7227     if ($src$$FloatRegister != D0) {
7228       __ mov_double(D0, $src$$FloatRegister);
7229     }
7230 #endif
7231     address target = CAST_FROM_FN_PTR(address, SharedRuntime::d2l);
7232     __ call(target, relocInfo::runtime_call_type);
7233   %}
7234   ins_pipe(fcvtD2L);
7235 %}
7236 
7237 instruct convF2D_reg(regD dst, regF src) %{
7238   match(Set dst (ConvF2D src));
7239   size(4);
7240   format %{ "FCVTDS  $dst,$src" %}
7241   ins_encode %{
7242     __ convert_f2d($dst$$FloatRegister, $src$$FloatRegister);
7243   %}
7244   ins_pipe(fcvtF2D);
7245 %}
7246 
7247 instruct convF2I_reg_reg(iRegI dst, regF src, regF tmp) %{
7248   match(Set dst (ConvF2I src));
7249   effect( TEMP tmp );
7250   ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); // FIXME
7251   size(8);
7252   format %{ "FTOSIZS  $tmp,$src\n\t"
7253             "FMRS     $dst, $tmp" %}
7254   ins_encode %{
7255     __ ftosizs($tmp$$FloatRegister, $src$$FloatRegister);
7256     __ fmrs($dst$$Register, $tmp$$FloatRegister);
7257   %}
7258   ins_pipe(fcvtF2I);
7259 %}
7260 
7261 // Float to Long conversion
7262 instruct convF2L_reg(R0R1RegL dst, regF src, R0RegI arg1) %{
7263   match(Set dst (ConvF2L src));
7264   ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); // FIXME
7265   effect(CALL);
7266   format %{ "convF2L  $dst,$src\t! call to SharedRuntime::f2l" %}
7267   ins_encode %{
7268 #ifndef __ABI_HARD__
7269     __ fmrs($arg1$$Register, $src$$FloatRegister);
7270 #else
7271     if($src$$FloatRegister != S0) {
7272       __ mov_float(S0, $src$$FloatRegister);
7273     }
7274 #endif
7275     address target = CAST_FROM_FN_PTR(address, SharedRuntime::f2l);
7276     __ call(target, relocInfo::runtime_call_type);
7277   %}
7278   ins_pipe(fcvtF2L);
7279 %}
7280 
7281 instruct convI2D_reg_reg(iRegI src, regD_low dst) %{
7282   match(Set dst (ConvI2D src));
7283   ins_cost(DEFAULT_COST + MEMORY_REF_COST); // FIXME
7284   size(8);
7285   format %{ "FMSR     $dst,$src \n\t"
7286             "FSITOD   $dst $dst"%}
7287   ins_encode %{
7288       __ fmsr($dst$$FloatRegister, $src$$Register);
7289       __ fsitod($dst$$FloatRegister, $dst$$FloatRegister);
7290   %}
7291   ins_pipe(fcvtI2D);
7292 %}
7293 
7294 instruct convI2F_reg_reg( regF dst, iRegI src ) %{
7295   match(Set dst (ConvI2F src));
7296   ins_cost(DEFAULT_COST + MEMORY_REF_COST); // FIXME
7297   size(8);
7298   format %{ "FMSR     $dst,$src \n\t"
7299             "FSITOS   $dst, $dst"%}
7300   ins_encode %{
7301       __ fmsr($dst$$FloatRegister, $src$$Register);
7302       __ fsitos($dst$$FloatRegister, $dst$$FloatRegister);
7303   %}
7304   ins_pipe(fcvtI2F);
7305 %}
7306 
7307 instruct convI2L_reg(iRegL dst, iRegI src) %{
7308   match(Set dst (ConvI2L src));
7309   size(8);
7310   format %{ "MOV    $dst.lo, $src \n\t"
7311             "ASR    $dst.hi,$src,31\t! int->long" %}
7312   ins_encode %{
7313     __ mov($dst$$Register, $src$$Register);
7314     __ mov($dst$$Register->successor(), AsmOperand($src$$Register, asr, 31));
7315   %}
7316   ins_pipe(ialu_reg_reg);
7317 %}
7318 
7319 // Zero-extend convert int to long
7320 instruct convI2L_reg_zex(iRegL dst, iRegI src, immL_32bits mask ) %{
7321   match(Set dst (AndL (ConvI2L src) mask) );
7322   size(8);
7323   format %{ "MOV    $dst.lo,$src.lo\t! zero-extend int to long\n\t"
7324             "MOV    $dst.hi, 0"%}
7325   ins_encode %{
7326     __ mov($dst$$Register, $src$$Register);
7327     __ mov($dst$$Register->successor(), 0);
7328   %}
7329   ins_pipe(ialu_reg_reg);
7330 %}
7331 
7332 // Zero-extend long
7333 instruct zerox_long(iRegL dst, iRegL src, immL_32bits mask ) %{
7334   match(Set dst (AndL src mask) );
7335   size(8);
7336   format %{ "MOV    $dst.lo,$src.lo\t! zero-extend long\n\t"
7337             "MOV    $dst.hi, 0"%}
7338   ins_encode %{
7339     __ mov($dst$$Register, $src$$Register);
7340     __ mov($dst$$Register->successor(), 0);
7341   %}
7342   ins_pipe(ialu_reg_reg);
7343 %}
7344 
7345 instruct MoveF2I_reg_reg(iRegI dst, regF src) %{
7346   match(Set dst (MoveF2I src));
7347   effect(DEF dst, USE src);
7348   ins_cost(MEMORY_REF_COST); // FIXME
7349 
7350   size(4);
7351   format %{ "FMRS   $dst,$src\t! MoveF2I" %}
7352   ins_encode %{
7353     __ fmrs($dst$$Register, $src$$FloatRegister);
7354   %}
7355   ins_pipe(iload_mem); // FIXME
7356 %}
7357 
7358 instruct MoveI2F_reg_reg(regF dst, iRegI src) %{
7359   match(Set dst (MoveI2F src));
7360   ins_cost(MEMORY_REF_COST); // FIXME
7361 
7362   size(4);
7363   format %{ "FMSR   $dst,$src\t! MoveI2F" %}
7364   ins_encode %{
7365     __ fmsr($dst$$FloatRegister, $src$$Register);
7366   %}
7367   ins_pipe(iload_mem); // FIXME
7368 %}
7369 
7370 instruct MoveD2L_reg_reg(iRegL dst, regD src) %{
7371   match(Set dst (MoveD2L src));
7372   effect(DEF dst, USE src);
7373   ins_cost(MEMORY_REF_COST); // FIXME
7374 
7375   size(4);
7376   format %{ "FMRRD    $dst,$src\t! MoveD2L" %}
7377   ins_encode %{
7378     __ fmrrd($dst$$Register, $dst$$Register->successor(), $src$$FloatRegister);
7379   %}
7380   ins_pipe(iload_mem); // FIXME
7381 %}
7382 
7383 instruct MoveL2D_reg_reg(regD dst, iRegL src) %{
7384   match(Set dst (MoveL2D src));
7385   effect(DEF dst, USE src);
7386   ins_cost(MEMORY_REF_COST); // FIXME
7387 
7388   size(4);
7389   format %{ "FMDRR   $dst,$src\t! MoveL2D" %}
7390   ins_encode %{
7391     __ fmdrr($dst$$FloatRegister, $src$$Register, $src$$Register->successor());
7392   %}
7393   ins_pipe(ialu_reg_reg); // FIXME
7394 %}
7395 
7396 //-----------
7397 // Long to Double conversion
7398 
7399 // Magic constant, 0x43300000
7400 instruct loadConI_x43300000(iRegI dst) %{
7401   effect(DEF dst);
7402   size(8);
7403   format %{ "MOV_SLOW  $dst,0x43300000\t! 2^52" %}
7404   ins_encode %{
7405     __ mov_slow($dst$$Register, 0x43300000);
7406   %}
7407   ins_pipe(ialu_none);
7408 %}
7409 
7410 // Magic constant, 0x41f00000
7411 instruct loadConI_x41f00000(iRegI dst) %{
7412   effect(DEF dst);
7413   size(8);
7414   format %{ "MOV_SLOW  $dst, 0x41f00000\t! 2^32" %}
7415   ins_encode %{
7416     __ mov_slow($dst$$Register, 0x41f00000);
7417   %}
7418   ins_pipe(ialu_none);
7419 %}
7420 
7421 instruct loadConI_x0(iRegI dst) %{
7422   effect(DEF dst);
7423   size(4);
7424   format %{ "MOV  $dst, 0x0\t! 0" %}
7425   ins_encode %{
7426     __ mov($dst$$Register, 0);
7427   %}
7428   ins_pipe(ialu_none);
7429 %}
7430 
7431 // Construct a double from two float halves
7432 instruct regDHi_regDLo_to_regD(regD_low dst, regD_low src1, regD_low src2) %{
7433   effect(DEF dst, USE src1, USE src2);
7434   size(8);
7435   format %{ "FCPYS  $dst.hi,$src1.hi\n\t"
7436             "FCPYS  $dst.lo,$src2.lo" %}
7437   ins_encode %{
7438     __ fcpys($dst$$FloatRegister->successor(), $src1$$FloatRegister->successor());
7439     __ fcpys($dst$$FloatRegister, $src2$$FloatRegister);
7440   %}
7441   ins_pipe(faddD_reg_reg);
7442 %}
7443 
7444 // Convert integer in high half of a double register (in the lower half of
7445 // the double register file) to double
7446 instruct convI2D_regDHi_regD(regD dst, regD_low src) %{
7447   effect(DEF dst, USE src);
7448   size(4);
7449   format %{ "FSITOD  $dst,$src" %}
7450   ins_encode %{
7451     __ fsitod($dst$$FloatRegister, $src$$FloatRegister->successor());
7452   %}
7453   ins_pipe(fcvtLHi2D);
7454 %}
7455 
7456 // Add float double precision
7457 instruct addD_regD_regD(regD dst, regD src1, regD src2) %{
7458   effect(DEF dst, USE src1, USE src2);
7459   size(4);
7460   format %{ "FADDD  $dst,$src1,$src2" %}
7461   ins_encode %{
7462     __ add_double($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
7463   %}
7464   ins_pipe(faddD_reg_reg);
7465 %}
7466 
7467 // Sub float double precision
7468 instruct subD_regD_regD(regD dst, regD src1, regD src2) %{
7469   effect(DEF dst, USE src1, USE src2);
7470   size(4);
7471   format %{ "FSUBD  $dst,$src1,$src2" %}
7472   ins_encode %{
7473     __ sub_double($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
7474   %}
7475   ins_pipe(faddD_reg_reg);
7476 %}
7477 
7478 // Mul float double precision
7479 instruct mulD_regD_regD(regD dst, regD src1, regD src2) %{
7480   effect(DEF dst, USE src1, USE src2);
7481   size(4);
7482   format %{ "FMULD  $dst,$src1,$src2" %}
7483   ins_encode %{
7484     __ mul_double($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
7485   %}
7486   ins_pipe(fmulD_reg_reg);
7487 %}
7488 
7489 instruct regL_to_regD(regD dst, iRegL src) %{
7490   // No match rule to avoid chain rule match.
7491   effect(DEF dst, USE src);
7492   ins_cost(MEMORY_REF_COST);
7493   size(4);
7494   format %{ "FMDRR   $dst,$src\t! regL to regD" %}
7495   ins_encode %{
7496     __ fmdrr($dst$$FloatRegister, $src$$Register, $src$$Register->successor());
7497   %}
7498   ins_pipe(ialu_reg_reg); // FIXME
7499 %}
7500 
7501 instruct regI_regI_to_regD(regD dst, iRegI src1, iRegI src2) %{
7502   // No match rule to avoid chain rule match.
7503   effect(DEF dst, USE src1, USE src2);
7504   ins_cost(MEMORY_REF_COST);
7505   size(4);
7506   format %{ "FMDRR   $dst,$src1,$src2\t! regI,regI to regD" %}
7507   ins_encode %{
7508     __ fmdrr($dst$$FloatRegister, $src1$$Register, $src2$$Register);
7509   %}
7510   ins_pipe(ialu_reg_reg); // FIXME
7511 %}
7512 
7513 instruct convL2D_reg_slow_fxtof(regD dst, iRegL src) %{
7514   match(Set dst (ConvL2D src));
7515   ins_cost(DEFAULT_COST*8 + MEMORY_REF_COST*6); // FIXME
7516 
7517   expand %{
7518     regD_low   tmpsrc;
7519     iRegI      ix43300000;
7520     iRegI      ix41f00000;
7521     iRegI      ix0;
7522     regD_low   dx43300000;
7523     regD       dx41f00000;
7524     regD       tmp1;
7525     regD_low   tmp2;
7526     regD       tmp3;
7527     regD       tmp4;
7528 
7529     regL_to_regD(tmpsrc, src);
7530 
7531     loadConI_x43300000(ix43300000);
7532     loadConI_x41f00000(ix41f00000);
7533     loadConI_x0(ix0);
7534 
7535     regI_regI_to_regD(dx43300000, ix0, ix43300000);
7536     regI_regI_to_regD(dx41f00000, ix0, ix41f00000);
7537 
7538     convI2D_regDHi_regD(tmp1, tmpsrc);
7539     regDHi_regDLo_to_regD(tmp2, dx43300000, tmpsrc);
7540     subD_regD_regD(tmp3, tmp2, dx43300000);
7541     mulD_regD_regD(tmp4, tmp1, dx41f00000);
7542     addD_regD_regD(dst, tmp3, tmp4);
7543   %}
7544 %}
7545 
7546 instruct convL2I_reg(iRegI dst, iRegL src) %{
7547   match(Set dst (ConvL2I src));
7548   size(4);
7549   format %{ "MOV    $dst,$src.lo\t! long->int" %}
7550   ins_encode %{
7551     __ mov($dst$$Register, $src$$Register);
7552   %}
7553   ins_pipe(ialu_move_reg_I_to_L);
7554 %}
7555 
7556 // Register Shift Right Immediate
7557 instruct shrL_reg_imm6_L2I(iRegI dst, iRegL src, immI_32_63 cnt) %{
7558   match(Set dst (ConvL2I (RShiftL src cnt)));
7559   size(4);
7560   format %{ "ASR    $dst,$src.hi,($cnt - 32)\t! long->int or mov if $cnt==32" %}
7561   ins_encode %{
7562     if ($cnt$$constant == 32) {
7563       __ mov($dst$$Register, $src$$Register->successor());
7564     } else {
7565       __ mov($dst$$Register, AsmOperand($src$$Register->successor(), asr, $cnt$$constant - 32));
7566     }
7567   %}
7568   ins_pipe(ialu_reg_imm);
7569 %}
7570 
7571 
7572 //----------Control Flow Instructions------------------------------------------
7573 // Compare Instructions
7574 // Compare Integers
7575 instruct compI_iReg(flagsReg icc, iRegI op1, iRegI op2) %{
7576   match(Set icc (CmpI op1 op2));
7577   effect( DEF icc, USE op1, USE op2 );
7578 
7579   size(4);
7580   format %{ "cmp_32 $op1,$op2\t! int" %}
7581   ins_encode %{
7582     __ cmp_32($op1$$Register, $op2$$Register);
7583   %}
7584   ins_pipe(ialu_cconly_reg_reg);
7585 %}
7586 
7587 #ifdef _LP64
7588 // Compare compressed pointers
7589 instruct compN_reg2(flagsRegU icc, iRegN op1, iRegN op2) %{
7590   match(Set icc (CmpN op1 op2));
7591   effect( DEF icc, USE op1, USE op2 );
7592 
7593   size(4);
7594   format %{ "cmp_32 $op1,$op2\t! int" %}
7595   ins_encode %{
7596     __ cmp_32($op1$$Register, $op2$$Register);
7597   %}
7598   ins_pipe(ialu_cconly_reg_reg);
7599 %}
7600 #endif
7601 
7602 instruct compU_iReg(flagsRegU icc, iRegI op1, iRegI op2) %{
7603   match(Set icc (CmpU op1 op2));
7604 
7605   size(4);
7606   format %{ "cmp_32 $op1,$op2\t! unsigned int" %}
7607   ins_encode %{
7608     __ cmp_32($op1$$Register, $op2$$Register);
7609   %}
7610   ins_pipe(ialu_cconly_reg_reg);
7611 %}
7612 
7613 instruct compI_iReg_immneg(flagsReg icc, iRegI op1, aimmIneg op2) %{
7614   match(Set icc (CmpI op1 op2));
7615   effect( DEF icc, USE op1 );
7616 
7617   size(4);
7618   format %{ "cmn_32 $op1,-$op2\t! int" %}
7619   ins_encode %{
7620     __ cmn_32($op1$$Register, -$op2$$constant);
7621   %}
7622   ins_pipe(ialu_cconly_reg_imm);
7623 %}
7624 
7625 instruct compI_iReg_imm(flagsReg icc, iRegI op1, aimmI op2) %{
7626   match(Set icc (CmpI op1 op2));
7627   effect( DEF icc, USE op1 );
7628 
7629   size(4);
7630   format %{ "cmp_32 $op1,$op2\t! int" %}
7631   ins_encode %{
7632     __ cmp_32($op1$$Register, $op2$$constant);
7633   %}
7634   ins_pipe(ialu_cconly_reg_imm);
7635 %}
7636 
7637 instruct testI_reg_reg( flagsReg_EQNELTGE icc, iRegI op1, iRegI op2, immI0 zero ) %{
7638   match(Set icc (CmpI (AndI op1 op2) zero));
7639   size(4);
7640   format %{ "tst_32 $op2,$op1" %}
7641 
7642   ins_encode %{
7643     __ tst_32($op1$$Register, $op2$$Register);
7644   %}
7645   ins_pipe(ialu_cconly_reg_reg_zero);
7646 %}
7647 
7648 instruct testshlI_reg_reg_reg( flagsReg_EQNELTGE icc, iRegI op1, iRegI op2, iRegI op3, immI0 zero ) %{
7649   match(Set icc (CmpI (AndI op1 (LShiftI op2 op3)) zero));
7650   size(4);
7651   format %{ "TST   $op2,$op1<<$op3" %}
7652 
7653   ins_encode %{
7654     __ tst($op1$$Register, AsmOperand($op2$$Register, lsl, $op3$$Register));
7655   %}
7656   ins_pipe(ialu_cconly_reg_reg_zero);
7657 %}
7658 
7659 instruct testshlI_reg_reg_imm( flagsReg_EQNELTGE icc, iRegI op1, iRegI op2, immU5 op3, immI0 zero ) %{
7660   match(Set icc (CmpI (AndI op1 (LShiftI op2 op3)) zero));
7661   size(4);
7662   format %{ "tst_32 $op2,$op1<<$op3" %}
7663 
7664   ins_encode %{
7665     __ tst_32($op1$$Register, AsmOperand($op2$$Register, lsl, $op3$$constant));
7666   %}
7667   ins_pipe(ialu_cconly_reg_reg_zero);
7668 %}
7669 
7670 instruct testsarI_reg_reg_reg( flagsReg_EQNELTGE icc, iRegI op1, iRegI op2, iRegI op3, immI0 zero ) %{
7671   match(Set icc (CmpI (AndI op1 (RShiftI op2 op3)) zero));
7672   size(4);
7673   format %{ "TST   $op2,$op1<<$op3" %}
7674 
7675   ins_encode %{
7676     __ tst($op1$$Register, AsmOperand($op2$$Register, asr, $op3$$Register));
7677   %}
7678   ins_pipe(ialu_cconly_reg_reg_zero);
7679 %}
7680 
7681 instruct testsarI_reg_reg_imm( flagsReg_EQNELTGE icc, iRegI op1, iRegI op2, immU5 op3, immI0 zero ) %{
7682   match(Set icc (CmpI (AndI op1 (RShiftI op2 op3)) zero));
7683   size(4);
7684   format %{ "tst_32 $op2,$op1<<$op3" %}
7685 
7686   ins_encode %{
7687     __ tst_32($op1$$Register, AsmOperand($op2$$Register, asr, $op3$$constant));
7688   %}
7689   ins_pipe(ialu_cconly_reg_reg_zero);
7690 %}
7691 
7692 instruct testshrI_reg_reg_reg( flagsReg_EQNELTGE icc, iRegI op1, iRegI op2, iRegI op3, immI0 zero ) %{
7693   match(Set icc (CmpI (AndI op1 (URShiftI op2 op3)) zero));
7694   size(4);
7695   format %{ "TST   $op2,$op1<<$op3" %}
7696 
7697   ins_encode %{
7698     __ tst($op1$$Register, AsmOperand($op2$$Register, lsr, $op3$$Register));
7699   %}
7700   ins_pipe(ialu_cconly_reg_reg_zero);
7701 %}
7702 
7703 instruct testshrI_reg_reg_imm( flagsReg_EQNELTGE icc, iRegI op1, iRegI op2, immU5 op3, immI0 zero ) %{
7704   match(Set icc (CmpI (AndI op1 (URShiftI op2 op3)) zero));
7705   size(4);
7706   format %{ "tst_32 $op2,$op1<<$op3" %}
7707 
7708   ins_encode %{
7709     __ tst_32($op1$$Register, AsmOperand($op2$$Register, lsr, $op3$$constant));
7710   %}
7711   ins_pipe(ialu_cconly_reg_reg_zero);
7712 %}
7713 
7714 instruct testI_reg_imm( flagsReg_EQNELTGE icc, iRegI op1, limmI op2, immI0 zero ) %{
7715   match(Set icc (CmpI (AndI op1 op2) zero));
7716   size(4);
7717   format %{ "tst_32 $op2,$op1" %}
7718 
7719   ins_encode %{
7720     __ tst_32($op1$$Register, $op2$$constant);
7721   %}
7722   ins_pipe(ialu_cconly_reg_imm_zero);
7723 %}
7724 
7725 instruct compL_reg_reg_LTGE(flagsRegL_LTGE xcc, iRegL op1, iRegL op2, iRegL tmp) %{
7726   match(Set xcc (CmpL op1 op2));
7727   effect( DEF xcc, USE op1, USE op2, TEMP tmp );
7728 
7729   size(8);
7730   format %{ "SUBS    $tmp,$op1.low,$op2.low\t\t! long\n\t"
7731             "SBCS    $tmp,$op1.hi,$op2.hi" %}
7732   ins_encode %{
7733     __ subs($tmp$$Register, $op1$$Register, $op2$$Register);
7734     __ sbcs($tmp$$Register->successor(), $op1$$Register->successor(), $op2$$Register->successor());
7735   %}
7736   ins_pipe(ialu_cconly_reg_reg);
7737 %}
7738 
7739 instruct compUL_reg_reg_LTGE(flagsRegUL_LTGE xcc, iRegL op1, iRegL op2, iRegL tmp) %{
7740   match(Set xcc (CmpUL op1 op2));
7741   effect(DEF xcc, USE op1, USE op2, TEMP tmp);
7742 
7743   size(8);
7744   format %{ "SUBS    $tmp,$op1.low,$op2.low\t\t! unsigned long\n\t"
7745             "SBCS    $tmp,$op1.hi,$op2.hi" %}
7746   ins_encode %{
7747     __ subs($tmp$$Register, $op1$$Register, $op2$$Register);
7748     __ sbcs($tmp$$Register->successor(), $op1$$Register->successor(), $op2$$Register->successor());
7749   %}
7750   ins_pipe(ialu_cconly_reg_reg);
7751 %}
7752 
7753 instruct compL_reg_reg_EQNE(flagsRegL_EQNE xcc, iRegL op1, iRegL op2) %{
7754   match(Set xcc (CmpL op1 op2));
7755   effect( DEF xcc, USE op1, USE op2 );
7756 
7757   size(8);
7758   format %{ "TEQ    $op1.hi,$op2.hi\t\t! long\n\t"
7759             "TEQ.eq $op1.lo,$op2.lo" %}
7760   ins_encode %{
7761     __ teq($op1$$Register->successor(), $op2$$Register->successor());
7762     __ teq($op1$$Register, $op2$$Register, eq);
7763   %}
7764   ins_pipe(ialu_cconly_reg_reg);
7765 %}
7766 
7767 instruct compL_reg_reg_LEGT(flagsRegL_LEGT xcc, iRegL op1, iRegL op2, iRegL tmp) %{
7768   match(Set xcc (CmpL op1 op2));
7769   effect( DEF xcc, USE op1, USE op2, TEMP tmp );
7770 
7771   size(8);
7772   format %{ "SUBS    $tmp,$op2.low,$op1.low\t\t! long\n\t"
7773             "SBCS    $tmp,$op2.hi,$op1.hi" %}
7774   ins_encode %{
7775     __ subs($tmp$$Register, $op2$$Register, $op1$$Register);
7776     __ sbcs($tmp$$Register->successor(), $op2$$Register->successor(), $op1$$Register->successor());
7777   %}
7778   ins_pipe(ialu_cconly_reg_reg);
7779 %}
7780 
7781 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
7782 // (hi($con$$constant), lo($con$$constant)) becomes
7783 instruct compL_reg_con_LTGE(flagsRegL_LTGE xcc, iRegL op1, immLlowRot con, iRegL tmp) %{
7784   match(Set xcc (CmpL op1 con));
7785   effect( DEF xcc, USE op1, USE con, TEMP tmp );
7786 
7787   size(8);
7788   format %{ "SUBS    $tmp,$op1.low,$con\t\t! long\n\t"
7789             "SBCS    $tmp,$op1.hi,0" %}
7790   ins_encode %{
7791     __ subs($tmp$$Register, $op1$$Register, $con$$constant);
7792     __ sbcs($tmp$$Register->successor(), $op1$$Register->successor(), 0);
7793   %}
7794 
7795   ins_pipe(ialu_cconly_reg_reg);
7796 %}
7797 
7798 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
7799 // (hi($con$$constant), lo($con$$constant)) becomes
7800 instruct compL_reg_con_EQNE(flagsRegL_EQNE xcc, iRegL op1, immLlowRot con) %{
7801   match(Set xcc (CmpL op1 con));
7802   effect( DEF xcc, USE op1, USE con );
7803 
7804   size(8);
7805   format %{ "TEQ    $op1.hi,0\t\t! long\n\t"
7806             "TEQ.eq $op1.lo,$con" %}
7807   ins_encode %{
7808     __ teq($op1$$Register->successor(), 0);
7809     __ teq($op1$$Register, $con$$constant, eq);
7810   %}
7811 
7812   ins_pipe(ialu_cconly_reg_reg);
7813 %}
7814 
7815 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
7816 // (hi($con$$constant), lo($con$$constant)) becomes
7817 instruct compL_reg_con_LEGT(flagsRegL_LEGT xcc, iRegL op1, immLlowRot con, iRegL tmp) %{
7818   match(Set xcc (CmpL op1 con));
7819   effect( DEF xcc, USE op1, USE con, TEMP tmp );
7820 
7821   size(8);
7822   format %{ "RSBS    $tmp,$op1.low,$con\t\t! long\n\t"
7823             "RSCS    $tmp,$op1.hi,0" %}
7824   ins_encode %{
7825     __ rsbs($tmp$$Register, $op1$$Register, $con$$constant);
7826     __ rscs($tmp$$Register->successor(), $op1$$Register->successor(), 0);
7827   %}
7828 
7829   ins_pipe(ialu_cconly_reg_reg);
7830 %}
7831 
7832 instruct compUL_reg_reg_EQNE(flagsRegUL_EQNE xcc, iRegL op1, iRegL op2) %{
7833   match(Set xcc (CmpUL op1 op2));
7834   effect(DEF xcc, USE op1, USE op2);
7835 
7836   size(8);
7837   format %{ "TEQ    $op1.hi,$op2.hi\t\t! unsigned long\n\t"
7838             "TEQ.eq $op1.lo,$op2.lo" %}
7839   ins_encode %{
7840     __ teq($op1$$Register->successor(), $op2$$Register->successor());
7841     __ teq($op1$$Register, $op2$$Register, eq);
7842   %}
7843   ins_pipe(ialu_cconly_reg_reg);
7844 %}
7845 
7846 instruct compUL_reg_reg_LEGT(flagsRegUL_LEGT xcc, iRegL op1, iRegL op2, iRegL tmp) %{
7847   match(Set xcc (CmpUL op1 op2));
7848   effect(DEF xcc, USE op1, USE op2, TEMP tmp);
7849 
7850   size(8);
7851   format %{ "SUBS    $tmp,$op2.low,$op1.low\t\t! unsigned long\n\t"
7852             "SBCS    $tmp,$op2.hi,$op1.hi" %}
7853   ins_encode %{
7854     __ subs($tmp$$Register, $op2$$Register, $op1$$Register);
7855     __ sbcs($tmp$$Register->successor(), $op2$$Register->successor(), $op1$$Register->successor());
7856   %}
7857   ins_pipe(ialu_cconly_reg_reg);
7858 %}
7859 
7860 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
7861 // (hi($con$$constant), lo($con$$constant)) becomes
7862 instruct compUL_reg_con_LTGE(flagsRegUL_LTGE xcc, iRegL op1, immLlowRot con, iRegL tmp) %{
7863   match(Set xcc (CmpUL op1 con));
7864   effect(DEF xcc, USE op1, USE con, TEMP tmp);
7865 
7866   size(8);
7867   format %{ "SUBS    $tmp,$op1.low,$con\t\t! unsigned long\n\t"
7868             "SBCS    $tmp,$op1.hi,0" %}
7869   ins_encode %{
7870     __ subs($tmp$$Register, $op1$$Register, $con$$constant);
7871     __ sbcs($tmp$$Register->successor(), $op1$$Register->successor(), 0);
7872   %}
7873 
7874   ins_pipe(ialu_cconly_reg_reg);
7875 %}
7876 
7877 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
7878 // (hi($con$$constant), lo($con$$constant)) becomes
7879 instruct compUL_reg_con_EQNE(flagsRegUL_EQNE xcc, iRegL op1, immLlowRot con) %{
7880   match(Set xcc (CmpUL op1 con));
7881   effect(DEF xcc, USE op1, USE con);
7882 
7883   size(8);
7884   format %{ "TEQ    $op1.hi,0\t\t! unsigned long\n\t"
7885             "TEQ.eq $op1.lo,$con" %}
7886   ins_encode %{
7887     __ teq($op1$$Register->successor(), 0);
7888     __ teq($op1$$Register, $con$$constant, eq);
7889   %}
7890 
7891   ins_pipe(ialu_cconly_reg_reg);
7892 %}
7893 
7894 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
7895 // (hi($con$$constant), lo($con$$constant)) becomes
7896 instruct compUL_reg_con_LEGT(flagsRegUL_LEGT xcc, iRegL op1, immLlowRot con, iRegL tmp) %{
7897   match(Set xcc (CmpUL op1 con));
7898   effect(DEF xcc, USE op1, USE con, TEMP tmp);
7899 
7900   size(8);
7901   format %{ "RSBS    $tmp,$op1.low,$con\t\t! unsigned long\n\t"
7902             "RSCS    $tmp,$op1.hi,0" %}
7903   ins_encode %{
7904     __ rsbs($tmp$$Register, $op1$$Register, $con$$constant);
7905     __ rscs($tmp$$Register->successor(), $op1$$Register->successor(), 0);
7906   %}
7907 
7908   ins_pipe(ialu_cconly_reg_reg);
7909 %}
7910 
7911 /* instruct testL_reg_reg(flagsRegL xcc, iRegL op1, iRegL op2, immL0 zero) %{ */
7912 /*   match(Set xcc (CmpL (AndL op1 op2) zero)); */
7913 /*   ins_encode %{ */
7914 /*     __ stop("testL_reg_reg unimplemented"); */
7915 /*   %} */
7916 /*   ins_pipe(ialu_cconly_reg_reg); */
7917 /* %} */
7918 
7919 /* // useful for checking the alignment of a pointer: */
7920 /* instruct testL_reg_con(flagsRegL xcc, iRegL op1, immLlowRot con, immL0 zero) %{ */
7921 /*   match(Set xcc (CmpL (AndL op1 con) zero)); */
7922 /*   ins_encode %{ */
7923 /*     __ stop("testL_reg_con unimplemented"); */
7924 /*   %} */
7925 /*   ins_pipe(ialu_cconly_reg_reg); */
7926 /* %} */
7927 
7928 instruct compU_iReg_imm(flagsRegU icc, iRegI op1, aimmU31 op2 ) %{
7929   match(Set icc (CmpU op1 op2));
7930 
7931   size(4);
7932   format %{ "cmp_32 $op1,$op2\t! unsigned" %}
7933   ins_encode %{
7934     __ cmp_32($op1$$Register, $op2$$constant);
7935   %}
7936   ins_pipe(ialu_cconly_reg_imm);
7937 %}
7938 
7939 // Compare Pointers
7940 instruct compP_iRegP(flagsRegP pcc, iRegP op1, iRegP op2 ) %{
7941   match(Set pcc (CmpP op1 op2));
7942 
7943   size(4);
7944   format %{ "CMP    $op1,$op2\t! ptr" %}
7945   ins_encode %{
7946     __ cmp($op1$$Register, $op2$$Register);
7947   %}
7948   ins_pipe(ialu_cconly_reg_reg);
7949 %}
7950 
7951 instruct compP_iRegP_imm(flagsRegP pcc, iRegP op1, aimmP op2 ) %{
7952   match(Set pcc (CmpP op1 op2));
7953 
7954   size(4);
7955   format %{ "CMP    $op1,$op2\t! ptr" %}
7956   ins_encode %{
7957     assert($op2$$constant == 0 || _opnds[2]->constant_reloc() == relocInfo::none, "reloc in cmp?");
7958     __ cmp($op1$$Register, $op2$$constant);
7959   %}
7960   ins_pipe(ialu_cconly_reg_imm);
7961 %}
7962 
7963 //----------Max and Min--------------------------------------------------------
7964 // Min Instructions
7965 // Conditional move for min
7966 instruct cmovI_reg_lt( iRegI op2, iRegI op1, flagsReg icc ) %{
7967   effect( USE_DEF op2, USE op1, USE icc );
7968 
7969   size(4);
7970   format %{ "MOV.lt  $op2,$op1\t! min" %}
7971   ins_encode %{
7972     __ mov($op2$$Register, $op1$$Register, lt);
7973   %}
7974   ins_pipe(ialu_reg_flags);
7975 %}
7976 
7977 // Min Register with Register.
7978 instruct minI_eReg(iRegI op1, iRegI op2) %{
7979   match(Set op2 (MinI op1 op2));
7980   ins_cost(DEFAULT_COST*2);
7981   expand %{
7982     flagsReg icc;
7983     compI_iReg(icc,op1,op2);
7984     cmovI_reg_lt(op2,op1,icc);
7985   %}
7986 %}
7987 
7988 // Max Instructions
7989 // Conditional move for max
7990 instruct cmovI_reg_gt( iRegI op2, iRegI op1, flagsReg icc ) %{
7991   effect( USE_DEF op2, USE op1, USE icc );
7992   format %{ "MOV.gt  $op2,$op1\t! max" %}
7993   ins_encode %{
7994     __ mov($op2$$Register, $op1$$Register, gt);
7995   %}
7996   ins_pipe(ialu_reg_flags);
7997 %}
7998 
7999 // Max Register with Register
8000 instruct maxI_eReg(iRegI op1, iRegI op2) %{
8001   match(Set op2 (MaxI op1 op2));
8002   ins_cost(DEFAULT_COST*2);
8003   expand %{
8004     flagsReg icc;
8005     compI_iReg(icc,op1,op2);
8006     cmovI_reg_gt(op2,op1,icc);
8007   %}
8008 %}
8009 
8010 
8011 //----------Float Compares----------------------------------------------------
8012 // Compare floating, generate condition code
8013 instruct cmpF_cc(flagsRegF fcc, flagsReg icc, regF src1, regF src2) %{
8014   match(Set icc (CmpF src1 src2));
8015   effect(KILL fcc);
8016 
8017   size(8);
8018   format %{ "FCMPs  $src1,$src2\n\t"
8019             "FMSTAT" %}
8020   ins_encode %{
8021     __ fcmps($src1$$FloatRegister, $src2$$FloatRegister);
8022     __ fmstat();
8023   %}
8024   ins_pipe(faddF_fcc_reg_reg_zero);
8025 %}
8026 
8027 instruct cmpF0_cc(flagsRegF fcc, flagsReg icc, regF src1, immF0 src2) %{
8028   match(Set icc (CmpF src1 src2));
8029   effect(KILL fcc);
8030 
8031   size(8);
8032   format %{ "FCMPs  $src1,$src2\n\t"
8033             "FMSTAT" %}
8034   ins_encode %{
8035     __ fcmpzs($src1$$FloatRegister);
8036     __ fmstat();
8037   %}
8038   ins_pipe(faddF_fcc_reg_reg_zero);
8039 %}
8040 
8041 instruct cmpD_cc(flagsRegF fcc, flagsReg icc, regD src1, regD src2) %{
8042   match(Set icc (CmpD src1 src2));
8043   effect(KILL fcc);
8044 
8045   size(8);
8046   format %{ "FCMPd  $src1,$src2 \n\t"
8047             "FMSTAT" %}
8048   ins_encode %{
8049     __ fcmpd($src1$$FloatRegister, $src2$$FloatRegister);
8050     __ fmstat();
8051   %}
8052   ins_pipe(faddD_fcc_reg_reg_zero);
8053 %}
8054 
8055 instruct cmpD0_cc(flagsRegF fcc, flagsReg icc, regD src1, immD0 src2) %{
8056   match(Set icc (CmpD src1 src2));
8057   effect(KILL fcc);
8058 
8059   size(8);
8060   format %{ "FCMPZd  $src1,$src2 \n\t"
8061             "FMSTAT" %}
8062   ins_encode %{
8063     __ fcmpzd($src1$$FloatRegister);
8064     __ fmstat();
8065   %}
8066   ins_pipe(faddD_fcc_reg_reg_zero);
8067 %}
8068 
8069 // Compare floating, generate -1,0,1
8070 instruct cmpF_reg(iRegI dst, regF src1, regF src2, flagsRegF fcc) %{
8071   match(Set dst (CmpF3 src1 src2));
8072   effect(KILL fcc);
8073   ins_cost(DEFAULT_COST*3+BRANCH_COST*3); // FIXME
8074   size(20);
8075   // same number of instructions as code using conditional moves but
8076   // doesn't kill integer condition register
8077   format %{ "FCMPs  $dst,$src1,$src2 \n\t"
8078             "VMRS   $dst, FPSCR \n\t"
8079             "OR     $dst, $dst, 0x08000000 \n\t"
8080             "EOR    $dst, $dst, $dst << 3 \n\t"
8081             "MOV    $dst, $dst >> 30" %}
8082   ins_encode %{
8083     __ fcmps($src1$$FloatRegister, $src2$$FloatRegister);
8084     __ floating_cmp($dst$$Register);
8085   %}
8086   ins_pipe( floating_cmp );
8087 %}
8088 
8089 instruct cmpF0_reg(iRegI dst, regF src1, immF0 src2, flagsRegF fcc) %{
8090   match(Set dst (CmpF3 src1 src2));
8091   effect(KILL fcc);
8092   ins_cost(DEFAULT_COST*3+BRANCH_COST*3); // FIXME
8093   size(20);
8094   // same number of instructions as code using conditional moves but
8095   // doesn't kill integer condition register
8096   format %{ "FCMPZs $dst,$src1,$src2 \n\t"
8097             "VMRS   $dst, FPSCR \n\t"
8098             "OR     $dst, $dst, 0x08000000 \n\t"
8099             "EOR    $dst, $dst, $dst << 3 \n\t"
8100             "MOV    $dst, $dst >> 30" %}
8101   ins_encode %{
8102     __ fcmpzs($src1$$FloatRegister);
8103     __ floating_cmp($dst$$Register);
8104   %}
8105   ins_pipe( floating_cmp );
8106 %}
8107 
8108 instruct cmpD_reg(iRegI dst, regD src1, regD src2, flagsRegF fcc) %{
8109   match(Set dst (CmpD3 src1 src2));
8110   effect(KILL fcc);
8111   ins_cost(DEFAULT_COST*3+BRANCH_COST*3); // FIXME
8112   size(20);
8113   // same number of instructions as code using conditional moves but
8114   // doesn't kill integer condition register
8115   format %{ "FCMPd  $dst,$src1,$src2 \n\t"
8116             "VMRS   $dst, FPSCR \n\t"
8117             "OR     $dst, $dst, 0x08000000 \n\t"
8118             "EOR    $dst, $dst, $dst << 3 \n\t"
8119             "MOV    $dst, $dst >> 30" %}
8120   ins_encode %{
8121     __ fcmpd($src1$$FloatRegister, $src2$$FloatRegister);
8122     __ floating_cmp($dst$$Register);
8123   %}
8124   ins_pipe( floating_cmp );
8125 %}
8126 
8127 instruct cmpD0_reg(iRegI dst, regD src1, immD0 src2, flagsRegF fcc) %{
8128   match(Set dst (CmpD3 src1 src2));
8129   effect(KILL fcc);
8130   ins_cost(DEFAULT_COST*3+BRANCH_COST*3); // FIXME
8131   size(20);
8132   // same number of instructions as code using conditional moves but
8133   // doesn't kill integer condition register
8134   format %{ "FCMPZd $dst,$src1,$src2 \n\t"
8135             "VMRS   $dst, FPSCR \n\t"
8136             "OR     $dst, $dst, 0x08000000 \n\t"
8137             "EOR    $dst, $dst, $dst << 3 \n\t"
8138             "MOV    $dst, $dst >> 30" %}
8139   ins_encode %{
8140     __ fcmpzd($src1$$FloatRegister);
8141     __ floating_cmp($dst$$Register);
8142   %}
8143   ins_pipe( floating_cmp );
8144 %}
8145 
8146 //----------Branches---------------------------------------------------------
8147 // Jump
8148 // (compare 'operand indIndex' and 'instruct addP_reg_reg' above)
8149 // FIXME
8150 instruct jumpXtnd(iRegX switch_val, iRegP tmp) %{
8151   match(Jump switch_val);
8152   effect(TEMP tmp);
8153   ins_cost(350);
8154   format %{  "ADD    $tmp, $constanttablebase, $switch_val\n\t"
8155              "LDR    $tmp,[$tmp + $constantoffset]\n\t"
8156              "BX     $tmp" %}
8157   size(20);
8158   ins_encode %{
8159     Register table_reg;
8160     Register label_reg = $tmp$$Register;
8161     if (constant_offset() == 0) {
8162       table_reg = $constanttablebase;
8163       __ ldr(label_reg, Address(table_reg, $switch_val$$Register));
8164     } else {
8165       table_reg = $tmp$$Register;
8166       int offset = $constantoffset;
8167       if (is_memoryP(offset)) {
8168         __ add(table_reg, $constanttablebase, $switch_val$$Register);
8169         __ ldr(label_reg, Address(table_reg, offset));
8170       } else {
8171         __ mov_slow(table_reg, $constantoffset);
8172         __ add(table_reg, $constanttablebase, table_reg);
8173         __ ldr(label_reg, Address(table_reg, $switch_val$$Register));
8174       }
8175     }
8176     __ jump(label_reg); // ldr + b better than ldr to PC for branch predictor?
8177     //    __ ldr(PC, Address($table$$Register, $switch_val$$Register));
8178   %}
8179   ins_pipe(ialu_reg_reg);
8180 %}
8181 
8182 // // Direct Branch.
8183 instruct branch(label labl) %{
8184   match(Goto);
8185   effect(USE labl);
8186 
8187   size(4);
8188   ins_cost(BRANCH_COST);
8189   format %{ "B     $labl" %}
8190   ins_encode %{
8191     __ b(*($labl$$label));
8192   %}
8193   ins_pipe(br);
8194 %}
8195 
8196 // Conditional Direct Branch
8197 instruct branchCon(cmpOp cmp, flagsReg icc, label labl) %{
8198   match(If cmp icc);
8199   effect(USE labl);
8200 
8201   size(4);
8202   ins_cost(BRANCH_COST);
8203   format %{ "B$cmp   $icc,$labl" %}
8204   ins_encode %{
8205     __ b(*($labl$$label), (AsmCondition)($cmp$$cmpcode));
8206   %}
8207   ins_pipe(br_cc);
8208 %}
8209 
8210 #ifdef ARM
8211 instruct branchCon_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, label labl) %{
8212   match(If cmp icc);
8213   effect(USE labl);
8214   predicate( _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne || _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
8215 
8216   size(4);
8217   ins_cost(BRANCH_COST);
8218   format %{ "B$cmp   $icc,$labl" %}
8219   ins_encode %{
8220     __ b(*($labl$$label), (AsmCondition)($cmp$$cmpcode));
8221   %}
8222   ins_pipe(br_cc);
8223 %}
8224 #endif
8225 
8226 
8227 instruct branchConU(cmpOpU cmp, flagsRegU icc, label labl) %{
8228   match(If cmp icc);
8229   effect(USE labl);
8230 
8231   size(4);
8232   ins_cost(BRANCH_COST);
8233   format %{ "B$cmp  $icc,$labl" %}
8234   ins_encode %{
8235     __ b(*($labl$$label), (AsmCondition)($cmp$$cmpcode));
8236   %}
8237   ins_pipe(br_cc);
8238 %}
8239 
8240 instruct branchConP(cmpOpP cmp, flagsRegP pcc, label labl) %{
8241   match(If cmp pcc);
8242   effect(USE labl);
8243 
8244   size(4);
8245   ins_cost(BRANCH_COST);
8246   format %{ "B$cmp  $pcc,$labl" %}
8247   ins_encode %{
8248     __ b(*($labl$$label), (AsmCondition)($cmp$$cmpcode));
8249   %}
8250   ins_pipe(br_cc);
8251 %}
8252 
8253 instruct branchConL_LTGE(cmpOpL cmp, flagsRegL_LTGE xcc, label labl) %{
8254   match(If cmp xcc);
8255   effect(USE labl);
8256   predicate( _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge );
8257 
8258   size(4);
8259   ins_cost(BRANCH_COST);
8260   format %{ "B$cmp  $xcc,$labl" %}
8261   ins_encode %{
8262     __ b(*($labl$$label), (AsmCondition)($cmp$$cmpcode));
8263   %}
8264   ins_pipe(br_cc);
8265 %}
8266 
8267 instruct branchConL_EQNE(cmpOpL cmp, flagsRegL_EQNE xcc, label labl) %{
8268   match(If cmp xcc);
8269   effect(USE labl);
8270   predicate( _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne );
8271 
8272   size(4);
8273   ins_cost(BRANCH_COST);
8274   format %{ "B$cmp  $xcc,$labl" %}
8275   ins_encode %{
8276     __ b(*($labl$$label), (AsmCondition)($cmp$$cmpcode));
8277   %}
8278   ins_pipe(br_cc);
8279 %}
8280 
8281 instruct branchConL_LEGT(cmpOpL_commute cmp, flagsRegL_LEGT xcc, label labl) %{
8282   match(If cmp xcc);
8283   effect(USE labl);
8284   predicate( _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt || _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le );
8285 
8286   size(4);
8287   ins_cost(BRANCH_COST);
8288   format %{ "B$cmp  $xcc,$labl" %}
8289   ins_encode %{
8290     __ b(*($labl$$label), (AsmCondition)($cmp$$cmpcode));
8291   %}
8292   ins_pipe(br_cc);
8293 %}
8294 
8295 instruct branchConUL_LTGE(cmpOpUL cmp, flagsRegUL_LTGE xcc, label labl) %{
8296   match(If cmp xcc);
8297   effect(USE labl);
8298   predicate(_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
8299 
8300   size(4);
8301   ins_cost(BRANCH_COST);
8302   format %{ "B$cmp  $xcc,$labl" %}
8303   ins_encode %{
8304     __ b(*($labl$$label), (AsmCondition)($cmp$$cmpcode));
8305   %}
8306   ins_pipe(br_cc);
8307 %}
8308 
8309 instruct branchConUL_EQNE(cmpOpUL cmp, flagsRegUL_EQNE xcc, label labl) %{
8310   match(If cmp xcc);
8311   effect(USE labl);
8312   predicate(_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne);
8313 
8314   size(4);
8315   ins_cost(BRANCH_COST);
8316   format %{ "B$cmp  $xcc,$labl" %}
8317   ins_encode %{
8318     __ b(*($labl$$label), (AsmCondition)($cmp$$cmpcode));
8319   %}
8320   ins_pipe(br_cc);
8321 %}
8322 
8323 instruct branchConUL_LEGT(cmpOpUL_commute cmp, flagsRegUL_LEGT xcc, label labl) %{
8324   match(If cmp xcc);
8325   effect(USE labl);
8326   predicate(_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt || _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le);
8327 
8328   size(4);
8329   ins_cost(BRANCH_COST);
8330   format %{ "B$cmp  $xcc,$labl" %}
8331   ins_encode %{
8332     __ b(*($labl$$label), (AsmCondition)($cmp$$cmpcode));
8333   %}
8334   ins_pipe(br_cc);
8335 %}
8336 
8337 instruct branchLoopEnd(cmpOp cmp, flagsReg icc, label labl) %{
8338   match(CountedLoopEnd cmp icc);
8339   effect(USE labl);
8340 
8341   size(4);
8342   ins_cost(BRANCH_COST);
8343   format %{ "B$cmp   $icc,$labl\t! Loop end" %}
8344   ins_encode %{
8345     __ b(*($labl$$label), (AsmCondition)($cmp$$cmpcode));
8346   %}
8347   ins_pipe(br_cc);
8348 %}
8349 
8350 // instruct branchLoopEndU(cmpOpU cmp, flagsRegU icc, label labl) %{
8351 //   match(CountedLoopEnd cmp icc);
8352 //   ins_pipe(br_cc);
8353 // %}
8354 
8355 // ============================================================================
8356 // Long Compare
8357 //
8358 // Currently we hold longs in 2 registers.  Comparing such values efficiently
8359 // is tricky.  The flavor of compare used depends on whether we are testing
8360 // for LT, LE, or EQ.  For a simple LT test we can check just the sign bit.
8361 // The GE test is the negated LT test.  The LE test can be had by commuting
8362 // the operands (yielding a GE test) and then negating; negate again for the
8363 // GT test.  The EQ test is done by ORcc'ing the high and low halves, and the
8364 // NE test is negated from that.
8365 
8366 // Due to a shortcoming in the ADLC, it mixes up expressions like:
8367 // (foo (CmpI (CmpL X Y) 0)) and (bar (CmpI (CmpL X 0L) 0)).  Note the
8368 // difference between 'Y' and '0L'.  The tree-matches for the CmpI sections
8369 // are collapsed internally in the ADLC's dfa-gen code.  The match for
8370 // (CmpI (CmpL X Y) 0) is silently replaced with (CmpI (CmpL X 0L) 0) and the
8371 // foo match ends up with the wrong leaf.  One fix is to not match both
8372 // reg-reg and reg-zero forms of long-compare.  This is unfortunate because
8373 // both forms beat the trinary form of long-compare and both are very useful
8374 // on Intel which has so few registers.
8375 
8376 // instruct branchCon_long(cmpOp cmp, flagsRegL xcc, label labl) %{
8377 //   match(If cmp xcc);
8378 //   ins_pipe(br_cc);
8379 // %}
8380 
8381 // Manifest a CmpL3 result in an integer register.  Very painful.
8382 // This is the test to avoid.
8383 instruct cmpL3_reg_reg(iRegI dst, iRegL src1, iRegL src2, flagsReg ccr ) %{
8384   match(Set dst (CmpL3 src1 src2) );
8385   effect( KILL ccr );
8386   ins_cost(6*DEFAULT_COST); // FIXME
8387   size(32);
8388   format %{
8389       "CMP    $src1.hi, $src2.hi\t\t! long\n"
8390     "\tMOV.gt $dst, 1\n"
8391     "\tmvn.lt $dst, 0\n"
8392     "\tB.ne   done\n"
8393     "\tSUBS   $dst, $src1.lo, $src2.lo\n"
8394     "\tMOV.hi $dst, 1\n"
8395     "\tmvn.lo $dst, 0\n"
8396     "done:"     %}
8397   ins_encode %{
8398     Label done;
8399     __ cmp($src1$$Register->successor(), $src2$$Register->successor());
8400     __ mov($dst$$Register, 1, gt);
8401     __ mvn($dst$$Register, 0, lt);
8402     __ b(done, ne);
8403     __ subs($dst$$Register, $src1$$Register, $src2$$Register);
8404     __ mov($dst$$Register, 1, hi);
8405     __ mvn($dst$$Register, 0, lo);
8406     __ bind(done);
8407   %}
8408   ins_pipe(cmpL_reg);
8409 %}
8410 
8411 // Conditional move
8412 instruct cmovLL_reg_LTGE(cmpOpL cmp, flagsRegL_LTGE xcc, iRegL dst, iRegL src) %{
8413   match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src)));
8414   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge );
8415 
8416   ins_cost(150);
8417   size(8);
8418   format %{ "MOV$cmp  $dst.lo,$src.lo\t! long\n\t"
8419             "MOV$cmp  $dst,$src.hi" %}
8420   ins_encode %{
8421     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
8422     __ mov($dst$$Register->successor(), $src$$Register->successor(), (AsmCondition)($cmp$$cmpcode));
8423   %}
8424   ins_pipe(ialu_reg);
8425 %}
8426 
8427 instruct cmovLL_reg_EQNE(cmpOpL cmp, flagsRegL_EQNE xcc, iRegL dst, iRegL src) %{
8428   match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src)));
8429   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne );
8430 
8431   ins_cost(150);
8432   size(8);
8433   format %{ "MOV$cmp  $dst.lo,$src.lo\t! long\n\t"
8434             "MOV$cmp  $dst,$src.hi" %}
8435   ins_encode %{
8436     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
8437     __ mov($dst$$Register->successor(), $src$$Register->successor(), (AsmCondition)($cmp$$cmpcode));
8438   %}
8439   ins_pipe(ialu_reg);
8440 %}
8441 
8442 instruct cmovLL_reg_LEGT(cmpOpL_commute cmp, flagsRegL_LEGT xcc, iRegL dst, iRegL src) %{
8443   match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src)));
8444   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt );
8445 
8446   ins_cost(150);
8447   size(8);
8448   format %{ "MOV$cmp  $dst.lo,$src.lo\t! long\n\t"
8449             "MOV$cmp  $dst,$src.hi" %}
8450   ins_encode %{
8451     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
8452     __ mov($dst$$Register->successor(), $src$$Register->successor(), (AsmCondition)($cmp$$cmpcode));
8453   %}
8454   ins_pipe(ialu_reg);
8455 %}
8456 
8457 instruct cmovLL_imm_LTGE(cmpOpL cmp, flagsRegL_LTGE xcc, iRegL dst, immL0 src) %{
8458   match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src)));
8459   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge );
8460   ins_cost(140);
8461   size(8);
8462   format %{ "MOV$cmp  $dst.lo,0\t! long\n\t"
8463             "MOV$cmp  $dst,0" %}
8464   ins_encode %{
8465     __ mov($dst$$Register, 0, (AsmCondition)($cmp$$cmpcode));
8466     __ mov($dst$$Register->successor(), 0, (AsmCondition)($cmp$$cmpcode));
8467   %}
8468   ins_pipe(ialu_imm);
8469 %}
8470 
8471 instruct cmovLL_imm_EQNE(cmpOpL cmp, flagsRegL_EQNE xcc, iRegL dst, immL0 src) %{
8472   match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src)));
8473   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne );
8474   ins_cost(140);
8475   size(8);
8476   format %{ "MOV$cmp  $dst.lo,0\t! long\n\t"
8477             "MOV$cmp  $dst,0" %}
8478   ins_encode %{
8479     __ mov($dst$$Register, 0, (AsmCondition)($cmp$$cmpcode));
8480     __ mov($dst$$Register->successor(), 0, (AsmCondition)($cmp$$cmpcode));
8481   %}
8482   ins_pipe(ialu_imm);
8483 %}
8484 
8485 instruct cmovLL_imm_LEGT(cmpOpL_commute cmp, flagsRegL_LEGT xcc, iRegL dst, immL0 src) %{
8486   match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src)));
8487   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt );
8488   ins_cost(140);
8489   size(8);
8490   format %{ "MOV$cmp  $dst.lo,0\t! long\n\t"
8491             "MOV$cmp  $dst,0" %}
8492   ins_encode %{
8493     __ mov($dst$$Register, 0, (AsmCondition)($cmp$$cmpcode));
8494     __ mov($dst$$Register->successor(), 0, (AsmCondition)($cmp$$cmpcode));
8495   %}
8496   ins_pipe(ialu_imm);
8497 %}
8498 
8499 instruct cmovIL_reg_LTGE(cmpOpL cmp, flagsRegL_LTGE xcc, iRegI dst, iRegI src) %{
8500   match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src)));
8501   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge );
8502 
8503   ins_cost(150);
8504   size(4);
8505   format %{ "MOV$cmp  $dst,$src" %}
8506   ins_encode %{
8507     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
8508   %}
8509   ins_pipe(ialu_reg);
8510 %}
8511 
8512 instruct cmovIL_reg_EQNE(cmpOpL cmp, flagsRegL_EQNE xcc, iRegI dst, iRegI src) %{
8513   match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src)));
8514   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne );
8515 
8516   ins_cost(150);
8517   size(4);
8518   format %{ "MOV$cmp  $dst,$src" %}
8519   ins_encode %{
8520     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
8521   %}
8522   ins_pipe(ialu_reg);
8523 %}
8524 
8525 instruct cmovIL_reg_LEGT(cmpOpL_commute cmp, flagsRegL_LEGT xcc, iRegI dst, iRegI src) %{
8526   match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src)));
8527   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt );
8528 
8529   ins_cost(150);
8530   size(4);
8531   format %{ "MOV$cmp  $dst,$src" %}
8532   ins_encode %{
8533     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
8534   %}
8535   ins_pipe(ialu_reg);
8536 %}
8537 
8538 instruct cmovIL_imm_LTGE(cmpOpL cmp, flagsRegL_LTGE xcc, iRegI dst, immI16 src) %{
8539   match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src)));
8540   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge );
8541 
8542   ins_cost(140);
8543   format %{ "MOVW$cmp  $dst,$src" %}
8544   ins_encode %{
8545     __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
8546   %}
8547   ins_pipe(ialu_imm);
8548 %}
8549 
8550 instruct cmovIL_imm_EQNE(cmpOpL cmp, flagsRegL_EQNE xcc, iRegI dst, immI16 src) %{
8551   match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src)));
8552   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne );
8553 
8554   ins_cost(140);
8555   format %{ "MOVW$cmp  $dst,$src" %}
8556   ins_encode %{
8557     __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
8558   %}
8559   ins_pipe(ialu_imm);
8560 %}
8561 
8562 instruct cmovIL_imm_LEGT(cmpOpL_commute cmp, flagsRegL_LEGT xcc, iRegI dst, immI16 src) %{
8563   match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src)));
8564   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt );
8565 
8566   ins_cost(140);
8567   format %{ "MOVW$cmp  $dst,$src" %}
8568   ins_encode %{
8569     __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
8570   %}
8571   ins_pipe(ialu_imm);
8572 %}
8573 
8574 instruct cmovPL_reg_LTGE(cmpOpL cmp, flagsRegL_LTGE xcc, iRegP dst, iRegP src) %{
8575   match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src)));
8576   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge );
8577 
8578   ins_cost(150);
8579   size(4);
8580   format %{ "MOV$cmp  $dst,$src" %}
8581   ins_encode %{
8582     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
8583   %}
8584   ins_pipe(ialu_reg);
8585 %}
8586 
8587 instruct cmovPL_reg_EQNE(cmpOpL cmp, flagsRegL_EQNE xcc, iRegP dst, iRegP src) %{
8588   match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src)));
8589   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne );
8590 
8591   ins_cost(150);
8592   size(4);
8593   format %{ "MOV$cmp  $dst,$src" %}
8594   ins_encode %{
8595     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
8596   %}
8597   ins_pipe(ialu_reg);
8598 %}
8599 
8600 instruct cmovPL_reg_LEGT(cmpOpL_commute cmp, flagsRegL_LEGT xcc, iRegP dst, iRegP src) %{
8601   match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src)));
8602   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt );
8603 
8604   ins_cost(150);
8605   size(4);
8606   format %{ "MOV$cmp  $dst,$src" %}
8607   ins_encode %{
8608     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
8609   %}
8610   ins_pipe(ialu_reg);
8611 %}
8612 
8613 instruct cmovPL_imm_LTGE(cmpOpL cmp, flagsRegL_LTGE xcc, iRegP dst, immP0 src) %{
8614   match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src)));
8615   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge );
8616 
8617   ins_cost(140);
8618   format %{ "MOVW$cmp  $dst,$src" %}
8619   ins_encode %{
8620     __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
8621   %}
8622   ins_pipe(ialu_imm);
8623 %}
8624 
8625 instruct cmovPL_imm_EQNE(cmpOpL cmp, flagsRegL_EQNE xcc, iRegP dst, immP0 src) %{
8626   match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src)));
8627   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne );
8628 
8629   ins_cost(140);
8630   format %{ "MOVW$cmp  $dst,$src" %}
8631   ins_encode %{
8632     __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
8633   %}
8634   ins_pipe(ialu_imm);
8635 %}
8636 
8637 instruct cmovPL_imm_LEGT(cmpOpL_commute cmp, flagsRegL_LEGT xcc, iRegP dst, immP0 src) %{
8638   match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src)));
8639   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt );
8640 
8641   ins_cost(140);
8642   format %{ "MOVW$cmp  $dst,$src" %}
8643   ins_encode %{
8644     __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
8645   %}
8646   ins_pipe(ialu_imm);
8647 %}
8648 
8649 instruct cmovFL_reg_LTGE(cmpOpL cmp, flagsRegL_LTGE xcc, regF dst, regF src) %{
8650   match(Set dst (CMoveF (Binary cmp xcc) (Binary dst src)));
8651   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge );
8652   ins_cost(150);
8653   size(4);
8654   format %{ "FCPYS$cmp $dst,$src" %}
8655   ins_encode %{
8656     __ fcpys($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
8657   %}
8658   ins_pipe(int_conditional_float_move);
8659 %}
8660 
8661 instruct cmovFL_reg_EQNE(cmpOpL cmp, flagsRegL_EQNE xcc, regF dst, regF src) %{
8662   match(Set dst (CMoveF (Binary cmp xcc) (Binary dst src)));
8663   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne );
8664   ins_cost(150);
8665   size(4);
8666   format %{ "FCPYS$cmp $dst,$src" %}
8667   ins_encode %{
8668     __ fcpys($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
8669   %}
8670   ins_pipe(int_conditional_float_move);
8671 %}
8672 
8673 instruct cmovFL_reg_LEGT(cmpOpL_commute cmp, flagsRegL_LEGT xcc, regF dst, regF src) %{
8674   match(Set dst (CMoveF (Binary cmp xcc) (Binary dst src)));
8675   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt );
8676   ins_cost(150);
8677   size(4);
8678   format %{ "FCPYS$cmp $dst,$src" %}
8679   ins_encode %{
8680     __ fcpys($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
8681   %}
8682   ins_pipe(int_conditional_float_move);
8683 %}
8684 
8685 instruct cmovDL_reg_LTGE(cmpOpL cmp, flagsRegL_LTGE xcc, regD dst, regD src) %{
8686   match(Set dst (CMoveD (Binary cmp xcc) (Binary dst src)));
8687   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge );
8688 
8689   ins_cost(150);
8690   size(4);
8691   format %{ "FCPYD$cmp $dst,$src" %}
8692   ins_encode %{
8693     __ fcpyd($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
8694   %}
8695   ins_pipe(int_conditional_float_move);
8696 %}
8697 
8698 instruct cmovDL_reg_EQNE(cmpOpL cmp, flagsRegL_EQNE xcc, regD dst, regD src) %{
8699   match(Set dst (CMoveD (Binary cmp xcc) (Binary dst src)));
8700   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne );
8701 
8702   ins_cost(150);
8703   size(4);
8704   format %{ "FCPYD$cmp $dst,$src" %}
8705   ins_encode %{
8706     __ fcpyd($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
8707   %}
8708   ins_pipe(int_conditional_float_move);
8709 %}
8710 
8711 instruct cmovDL_reg_LEGT(cmpOpL_commute cmp, flagsRegL_LEGT xcc, regD dst, regD src) %{
8712   match(Set dst (CMoveD (Binary cmp xcc) (Binary dst src)));
8713   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt );
8714 
8715   ins_cost(150);
8716   size(4);
8717   format %{ "FCPYD$cmp $dst,$src" %}
8718   ins_encode %{
8719     __ fcpyd($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
8720   %}
8721   ins_pipe(int_conditional_float_move);
8722 %}
8723 
8724 // ============================================================================
8725 // Safepoint Instruction
8726 // rather than KILL R12, it would be better to use any reg as
8727 // TEMP. Can't do that at this point because it crashes the compiler
8728 instruct safePoint_poll(iRegP poll, R12RegI tmp, flagsReg icc) %{
8729   match(SafePoint poll);
8730   effect(USE poll, KILL tmp, KILL icc);
8731 
8732   size(4);
8733   format %{ "LDR   $tmp,[$poll]\t! Safepoint: poll for GC" %}
8734   ins_encode %{
8735     __ relocate(relocInfo::poll_type);
8736     __ ldr($tmp$$Register, Address($poll$$Register));
8737   %}
8738   ins_pipe(loadPollP);
8739 %}
8740 
8741 
8742 // ============================================================================
8743 // Call Instructions
8744 // Call Java Static Instruction
8745 instruct CallStaticJavaDirect( method meth ) %{
8746   match(CallStaticJava);
8747   predicate(! ((CallStaticJavaNode*)n)->is_method_handle_invoke());
8748   effect(USE meth);
8749 
8750   ins_cost(CALL_COST);
8751   format %{ "CALL,static ==> " %}
8752   ins_encode( Java_Static_Call( meth ), call_epilog );
8753   ins_pipe(simple_call);
8754 %}
8755 
8756 // Call Java Static Instruction (method handle version)
8757 instruct CallStaticJavaHandle( method meth ) %{
8758   match(CallStaticJava);
8759   predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
8760   effect(USE meth);
8761   // FP is saved by all callees (for interpreter stack correction).
8762   // We use it here for a similar purpose, in {preserve,restore}_FP.
8763 
8764   ins_cost(CALL_COST);
8765   format %{ "CALL,static/MethodHandle ==> " %}
8766   ins_encode( preserve_SP, Java_Static_Call( meth ), restore_SP, call_epilog );
8767   ins_pipe(simple_call);
8768 %}
8769 
8770 // Call Java Dynamic Instruction
8771 instruct CallDynamicJavaDirect( method meth ) %{
8772   match(CallDynamicJava);
8773   effect(USE meth);
8774 
8775   ins_cost(CALL_COST);
8776   format %{ "MOV_OOP    (empty),R_R8\n\t"
8777             "CALL,dynamic  ; NOP ==> " %}
8778   ins_encode( Java_Dynamic_Call( meth ), call_epilog );
8779   ins_pipe(call);
8780 %}
8781 
8782 // Call Runtime Instruction
8783 instruct CallRuntimeDirect(method meth) %{
8784   match(CallRuntime);
8785   effect(USE meth);
8786   ins_cost(CALL_COST);
8787   format %{ "CALL,runtime" %}
8788   ins_encode( Java_To_Runtime( meth ),
8789               call_epilog );
8790   ins_pipe(simple_call);
8791 %}
8792 
8793 // Call runtime without safepoint - same as CallRuntime
8794 instruct CallLeafDirect(method meth) %{
8795   match(CallLeaf);
8796   effect(USE meth);
8797   ins_cost(CALL_COST);
8798   format %{ "CALL,runtime leaf" %}
8799   // TODO: ned save_last_PC here?
8800   ins_encode( Java_To_Runtime( meth ),
8801               call_epilog );
8802   ins_pipe(simple_call);
8803 %}
8804 
8805 // Call runtime without safepoint - same as CallLeaf
8806 instruct CallLeafNoFPDirect(method meth) %{
8807   match(CallLeafNoFP);
8808   effect(USE meth);
8809   ins_cost(CALL_COST);
8810   format %{ "CALL,runtime leaf nofp" %}
8811   // TODO: ned save_last_PC here?
8812   ins_encode( Java_To_Runtime( meth ),
8813               call_epilog );
8814   ins_pipe(simple_call);
8815 %}
8816 
8817 // Tail Call; Jump from runtime stub to Java code.
8818 // Also known as an 'interprocedural jump'.
8819 // Target of jump will eventually return to caller.
8820 // TailJump below removes the return address.
8821 instruct TailCalljmpInd(IPRegP jump_target, inline_cache_regP method_oop) %{
8822   match(TailCall jump_target method_oop );
8823 
8824   ins_cost(CALL_COST);
8825   format %{ "MOV    Rexception_pc, LR\n\t"
8826             "jump   $jump_target  \t! $method_oop holds method oop" %}
8827   ins_encode %{
8828     __ mov(Rexception_pc, LR);   // this is used only to call
8829                                  // StubRoutines::forward_exception_entry()
8830                                  // which expects PC of exception in
8831                                  // R5. FIXME?
8832     __ jump($jump_target$$Register);
8833   %}
8834   ins_pipe(tail_call);
8835 %}
8836 
8837 
8838 // Return Instruction
8839 instruct Ret() %{
8840   match(Return);
8841 
8842   format %{ "ret LR" %}
8843 
8844   ins_encode %{
8845     __ ret(LR);
8846   %}
8847 
8848   ins_pipe(br);
8849 %}
8850 
8851 
8852 // Tail Jump; remove the return address; jump to target.
8853 // TailCall above leaves the return address around.
8854 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
8855 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
8856 // "restore" before this instruction (in Epilogue), we need to materialize it
8857 // in %i0.
8858 instruct tailjmpInd(IPRegP jump_target, RExceptionRegP ex_oop) %{
8859   match( TailJump jump_target ex_oop );
8860   ins_cost(CALL_COST);
8861   format %{ "MOV    Rexception_pc, LR\n\t"
8862             "jump   $jump_target \t! $ex_oop holds exc. oop" %}
8863   ins_encode %{
8864     __ mov(Rexception_pc, LR);
8865     __ jump($jump_target$$Register);
8866   %}
8867   ins_pipe(tail_call);
8868 %}
8869 
8870 // Create exception oop: created by stack-crawling runtime code.
8871 // Created exception is now available to this handler, and is setup
8872 // just prior to jumping to this handler.  No code emitted.
8873 instruct CreateException( RExceptionRegP ex_oop )
8874 %{
8875   match(Set ex_oop (CreateEx));
8876   ins_cost(0);
8877 
8878   size(0);
8879   // use the following format syntax
8880   format %{ "! exception oop is in Rexception_obj; no code emitted" %}
8881   ins_encode();
8882   ins_pipe(empty);
8883 %}
8884 
8885 
8886 // Rethrow exception:
8887 // The exception oop will come in the first argument position.
8888 // Then JUMP (not call) to the rethrow stub code.
8889 instruct RethrowException()
8890 %{
8891   match(Rethrow);
8892   ins_cost(CALL_COST);
8893 
8894   // use the following format syntax
8895   format %{ "b    rethrow_stub" %}
8896   ins_encode %{
8897     Register scratch = R1_tmp;
8898     assert_different_registers(scratch, c_rarg0, LR);
8899     __ jump(OptoRuntime::rethrow_stub(), relocInfo::runtime_call_type, scratch);
8900   %}
8901   ins_pipe(tail_call);
8902 %}
8903 
8904 
8905 // Die now
8906 instruct ShouldNotReachHere( )
8907 %{
8908   match(Halt);
8909   ins_cost(CALL_COST);
8910 
8911   size(4);
8912   // Use the following format syntax
8913   format %{ "ShouldNotReachHere" %}
8914   ins_encode %{
8915     __ udf(0xdead);
8916   %}
8917   ins_pipe(tail_call);
8918 %}
8919 
8920 // ============================================================================
8921 // The 2nd slow-half of a subtype check.  Scan the subklass's 2ndary superklass
8922 // array for an instance of the superklass.  Set a hidden internal cache on a
8923 // hit (cache is checked with exposed code in gen_subtype_check()).  Return
8924 // not zero for a miss or zero for a hit.  The encoding ALSO sets flags.
8925 instruct partialSubtypeCheck( R0RegP index, R1RegP sub, R2RegP super, flagsRegP pcc, LRRegP lr ) %{
8926   match(Set index (PartialSubtypeCheck sub super));
8927   effect( KILL pcc, KILL lr );
8928   ins_cost(DEFAULT_COST*10);
8929   format %{ "CALL   PartialSubtypeCheck" %}
8930   ins_encode %{
8931     __ call(StubRoutines::Arm::partial_subtype_check(), relocInfo::runtime_call_type);
8932   %}
8933   ins_pipe(partial_subtype_check_pipe);
8934 %}
8935 
8936 /* instruct partialSubtypeCheck_vs_zero( flagsRegP pcc, o1RegP sub, o2RegP super, immP0 zero, o0RegP idx, o7RegP o7 ) %{ */
8937 /*   match(Set pcc (CmpP (PartialSubtypeCheck sub super) zero)); */
8938 /*   ins_pipe(partial_subtype_check_pipe); */
8939 /* %} */
8940 
8941 
8942 // ============================================================================
8943 // inlined locking and unlocking
8944 
8945 instruct cmpFastLock(flagsRegP pcc, iRegP object, iRegP box, iRegP scratch2, iRegP scratch )
8946 %{
8947   match(Set pcc (FastLock object box));
8948   predicate(!(UseBiasedLocking && !UseOptoBiasInlining));
8949 
8950   effect(TEMP scratch, TEMP scratch2);
8951   ins_cost(DEFAULT_COST*3);
8952 
8953   format %{ "FASTLOCK  $object, $box; KILL $scratch, $scratch2" %}
8954   ins_encode %{
8955     __ fast_lock($object$$Register, $box$$Register, $scratch$$Register, $scratch2$$Register);
8956   %}
8957   ins_pipe(long_memory_op);
8958 %}
8959 
8960 instruct cmpFastLock_noBiasInline(flagsRegP pcc, iRegP object, iRegP box, iRegP scratch2,
8961                                   iRegP scratch, iRegP scratch3) %{
8962   match(Set pcc (FastLock object box));
8963   predicate(UseBiasedLocking && !UseOptoBiasInlining);
8964 
8965   effect(TEMP scratch, TEMP scratch2, TEMP scratch3);
8966   ins_cost(DEFAULT_COST*5);
8967 
8968   format %{ "FASTLOCK  $object, $box; KILL $scratch, $scratch2, $scratch3" %}
8969   ins_encode %{
8970     __ fast_lock($object$$Register, $box$$Register, $scratch$$Register, $scratch2$$Register, $scratch3$$Register);
8971   %}
8972   ins_pipe(long_memory_op);
8973 %}
8974 
8975 
8976 instruct cmpFastUnlock(flagsRegP pcc, iRegP object, iRegP box, iRegP scratch2, iRegP scratch ) %{
8977   match(Set pcc (FastUnlock object box));
8978   effect(TEMP scratch, TEMP scratch2);
8979   ins_cost(100);
8980 
8981   format %{ "FASTUNLOCK  $object, $box; KILL $scratch, $scratch2" %}
8982   ins_encode %{
8983     __ fast_unlock($object$$Register, $box$$Register, $scratch$$Register, $scratch2$$Register);
8984   %}
8985   ins_pipe(long_memory_op);
8986 %}
8987 
8988 // Count and Base registers are fixed because the allocator cannot
8989 // kill unknown registers.  The encodings are generic.
8990 instruct clear_array(iRegX cnt, iRegP base, iRegI temp, iRegX zero, Universe dummy, flagsReg cpsr) %{
8991   match(Set dummy (ClearArray cnt base));
8992   effect(TEMP temp, TEMP zero, KILL cpsr);
8993   ins_cost(300);
8994   format %{ "MOV    $zero,0\n"
8995       "        MOV    $temp,$cnt\n"
8996       "loop:   SUBS   $temp,$temp,4\t! Count down a dword of bytes\n"
8997       "        STR.ge $zero,[$base+$temp]\t! delay slot"
8998       "        B.gt   loop\t\t! Clearing loop\n" %}
8999   ins_encode %{
9000     __ mov($zero$$Register, 0);
9001     __ mov($temp$$Register, $cnt$$Register);
9002     Label(loop);
9003     __ bind(loop);
9004     __ subs($temp$$Register, $temp$$Register, 4);
9005     __ str($zero$$Register, Address($base$$Register, $temp$$Register), ge);
9006     __ b(loop, gt);
9007   %}
9008   ins_pipe(long_memory_op);
9009 %}
9010 
9011 #ifdef XXX
9012 // FIXME: Why R0/R1/R2/R3?
9013 instruct string_compare(R0RegP str1, R1RegP str2, R2RegI cnt1, R3RegI cnt2, iRegI result,
9014                         iRegI tmp1, iRegI tmp2, flagsReg ccr) %{
9015   predicate(!CompactStrings);
9016   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
9017   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ccr, TEMP tmp1, TEMP tmp2);
9018   ins_cost(300);
9019   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   // TEMP $tmp1, $tmp2" %}
9020   ins_encode( enc_String_Compare(str1, str2, cnt1, cnt2, result, tmp1, tmp2) );
9021 
9022   ins_pipe(long_memory_op);
9023 %}
9024 
9025 // FIXME: Why R0/R1/R2?
9026 instruct string_equals(R0RegP str1, R1RegP str2, R2RegI cnt, iRegI result, iRegI tmp1, iRegI tmp2,
9027                        flagsReg ccr) %{
9028   predicate(!CompactStrings);
9029   match(Set result (StrEquals (Binary str1 str2) cnt));
9030   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, TEMP tmp1, TEMP tmp2, TEMP result, KILL ccr);
9031 
9032   ins_cost(300);
9033   format %{ "String Equals $str1,$str2,$cnt -> $result   // TEMP $tmp1, $tmp2" %}
9034   ins_encode( enc_String_Equals(str1, str2, cnt, result, tmp1, tmp2) );
9035   ins_pipe(long_memory_op);
9036 %}
9037 
9038 // FIXME: Why R0/R1?
9039 instruct array_equals(R0RegP ary1, R1RegP ary2, iRegI tmp1, iRegI tmp2, iRegI tmp3, iRegI result,
9040                       flagsReg ccr) %{
9041   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
9042   match(Set result (AryEq ary1 ary2));
9043   effect(USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP result, KILL ccr);
9044 
9045   ins_cost(300);
9046   format %{ "Array Equals $ary1,$ary2 -> $result   // TEMP $tmp1,$tmp2,$tmp3" %}
9047   ins_encode( enc_Array_Equals(ary1, ary2, tmp1, tmp2, tmp3, result));
9048   ins_pipe(long_memory_op);
9049 %}
9050 #endif
9051 
9052 //---------- Zeros Count Instructions ------------------------------------------
9053 
9054 instruct countLeadingZerosI(iRegI dst, iRegI src) %{
9055   match(Set dst (CountLeadingZerosI src));
9056   size(4);
9057   format %{ "CLZ_32 $dst,$src" %}
9058   ins_encode %{
9059     __ clz_32($dst$$Register, $src$$Register);
9060   %}
9061   ins_pipe(ialu_reg);
9062 %}
9063 
9064 instruct countLeadingZerosL(iRegI dst, iRegL src, iRegI tmp, flagsReg ccr) %{
9065   match(Set dst (CountLeadingZerosL src));
9066   effect(TEMP tmp, TEMP dst, KILL ccr);
9067   size(16);
9068   format %{ "CLZ    $dst,$src.hi\n\t"
9069             "TEQ    $dst,32\n\t"
9070             "CLZ.eq $tmp,$src.lo\n\t"
9071             "ADD.eq $dst, $dst, $tmp\n\t" %}
9072   ins_encode %{
9073     __ clz($dst$$Register, $src$$Register->successor());
9074     __ teq($dst$$Register, 32);
9075     __ clz($tmp$$Register, $src$$Register, eq);
9076     __ add($dst$$Register, $dst$$Register, $tmp$$Register, eq);
9077   %}
9078   ins_pipe(ialu_reg);
9079 %}
9080 
9081 instruct countTrailingZerosI(iRegI dst, iRegI src, iRegI tmp) %{
9082   match(Set dst (CountTrailingZerosI src));
9083   effect(TEMP tmp);
9084   size(8);
9085   format %{ "RBIT_32 $tmp, $src\n\t"
9086             "CLZ_32  $dst,$tmp" %}
9087   ins_encode %{
9088     __ rbit_32($tmp$$Register, $src$$Register);
9089     __ clz_32($dst$$Register, $tmp$$Register);
9090   %}
9091   ins_pipe(ialu_reg);
9092 %}
9093 
9094 instruct countTrailingZerosL(iRegI dst, iRegL src, iRegI tmp, flagsReg ccr) %{
9095   match(Set dst (CountTrailingZerosL src));
9096   effect(TEMP tmp, TEMP dst, KILL ccr);
9097   size(24);
9098   format %{ "RBIT   $tmp,$src.lo\n\t"
9099             "CLZ    $dst,$tmp\n\t"
9100             "TEQ    $dst,32\n\t"
9101             "RBIT   $tmp,$src.hi\n\t"
9102             "CLZ.eq $tmp,$tmp\n\t"
9103             "ADD.eq $dst,$dst,$tmp\n\t" %}
9104   ins_encode %{
9105     __ rbit($tmp$$Register, $src$$Register);
9106     __ clz($dst$$Register, $tmp$$Register);
9107     __ teq($dst$$Register, 32);
9108     __ rbit($tmp$$Register, $src$$Register->successor());
9109     __ clz($tmp$$Register, $tmp$$Register, eq);
9110     __ add($dst$$Register, $dst$$Register, $tmp$$Register, eq);
9111   %}
9112   ins_pipe(ialu_reg);
9113 %}
9114 
9115 
9116 //---------- Population Count Instructions -------------------------------------
9117 
9118 instruct popCountI(iRegI dst, iRegI src, regD_low tmp) %{
9119   predicate(UsePopCountInstruction);
9120   match(Set dst (PopCountI src));
9121   effect(TEMP tmp);
9122 
9123   format %{ "FMSR       $tmp,$src\n\t"
9124             "VCNT.8     $tmp,$tmp\n\t"
9125             "VPADDL.U8  $tmp,$tmp\n\t"
9126             "VPADDL.U16 $tmp,$tmp\n\t"
9127             "FMRS       $dst,$tmp" %}
9128   size(20);
9129 
9130   ins_encode %{
9131     __ fmsr($tmp$$FloatRegister, $src$$Register);
9132     __ vcnt($tmp$$FloatRegister, $tmp$$FloatRegister);
9133     __ vpaddl($tmp$$FloatRegister, $tmp$$FloatRegister, 8, 0);
9134     __ vpaddl($tmp$$FloatRegister, $tmp$$FloatRegister, 16, 0);
9135     __ fmrs($dst$$Register, $tmp$$FloatRegister);
9136   %}
9137   ins_pipe(ialu_reg); // FIXME
9138 %}
9139 
9140 // Note: Long.bitCount(long) returns an int.
9141 instruct popCountL(iRegI dst, iRegL src, regD_low tmp) %{
9142   predicate(UsePopCountInstruction);
9143   match(Set dst (PopCountL src));
9144   effect(TEMP tmp);
9145 
9146   format %{ "FMDRR       $tmp,$src.lo,$src.hi\n\t"
9147             "VCNT.8      $tmp,$tmp\n\t"
9148             "VPADDL.U8   $tmp,$tmp\n\t"
9149             "VPADDL.U16  $tmp,$tmp\n\t"
9150             "VPADDL.U32  $tmp,$tmp\n\t"
9151             "FMRS        $dst,$tmp" %}
9152 
9153   size(32);
9154 
9155   ins_encode %{
9156     __ fmdrr($tmp$$FloatRegister, $src$$Register, $src$$Register->successor());
9157     __ vcnt($tmp$$FloatRegister, $tmp$$FloatRegister);
9158     __ vpaddl($tmp$$FloatRegister, $tmp$$FloatRegister, 8, 0);
9159     __ vpaddl($tmp$$FloatRegister, $tmp$$FloatRegister, 16, 0);
9160     __ vpaddl($tmp$$FloatRegister, $tmp$$FloatRegister, 32, 0);
9161     __ fmrs($dst$$Register, $tmp$$FloatRegister);
9162   %}
9163   ins_pipe(ialu_reg);
9164 %}
9165 
9166 
9167 // ============================================================================
9168 //------------Bytes reverse--------------------------------------------------
9169 
9170 instruct bytes_reverse_int(iRegI dst, iRegI src) %{
9171   match(Set dst (ReverseBytesI src));
9172 
9173   size(4);
9174   format %{ "REV32 $dst,$src" %}
9175   ins_encode %{
9176     __ rev($dst$$Register, $src$$Register);
9177   %}
9178   ins_pipe( iload_mem ); // FIXME
9179 %}
9180 
9181 instruct bytes_reverse_long(iRegL dst, iRegL src) %{
9182   match(Set dst (ReverseBytesL src));
9183   effect(TEMP dst);
9184   size(8);
9185   format %{ "REV $dst.lo,$src.lo\n\t"
9186             "REV $dst.hi,$src.hi" %}
9187   ins_encode %{
9188     __ rev($dst$$Register, $src$$Register->successor());
9189     __ rev($dst$$Register->successor(), $src$$Register);
9190   %}
9191   ins_pipe( iload_mem ); // FIXME
9192 %}
9193 
9194 instruct bytes_reverse_unsigned_short(iRegI dst, iRegI src) %{
9195   match(Set dst (ReverseBytesUS src));
9196   size(4);
9197   format %{ "REV16 $dst,$src" %}
9198   ins_encode %{
9199     __ rev16($dst$$Register, $src$$Register);
9200   %}
9201   ins_pipe( iload_mem ); // FIXME
9202 %}
9203 
9204 instruct bytes_reverse_short(iRegI dst, iRegI src) %{
9205   match(Set dst (ReverseBytesS src));
9206   size(4);
9207   format %{ "REVSH $dst,$src" %}
9208   ins_encode %{
9209     __ revsh($dst$$Register, $src$$Register);
9210   %}
9211   ins_pipe( iload_mem ); // FIXME
9212 %}
9213 
9214 
9215 // ====================VECTOR INSTRUCTIONS=====================================
9216 
9217 // Load Aligned Packed values into a Double Register
9218 instruct loadV8(vecD dst, memoryD mem) %{
9219   predicate(n->as_LoadVector()->memory_size() == 8);
9220   match(Set dst (LoadVector mem));
9221   ins_cost(MEMORY_REF_COST);
9222   size(4);
9223   format %{ "FLDD   $mem,$dst\t! load vector (8 bytes)" %}
9224   ins_encode %{
9225     __ ldr_double($dst$$FloatRegister, $mem$$Address);
9226   %}
9227   ins_pipe(floadD_mem);
9228 %}
9229 
9230 // Load Aligned Packed values into a Double Register Pair
9231 instruct loadV16(vecX dst, memoryvld mem) %{
9232   predicate(n->as_LoadVector()->memory_size() == 16);
9233   match(Set dst (LoadVector mem));
9234   ins_cost(MEMORY_REF_COST);
9235   size(4);
9236   format %{ "VLD1   $mem,$dst.Q\t! load vector (16 bytes)" %}
9237   ins_encode %{
9238     __ vld1($dst$$FloatRegister, $mem$$Address, MacroAssembler::VELEM_SIZE_16, 128);
9239   %}
9240   ins_pipe(floadD_mem); // FIXME
9241 %}
9242 
9243 // Store Vector in Double register to memory
9244 instruct storeV8(memoryD mem, vecD src) %{
9245   predicate(n->as_StoreVector()->memory_size() == 8);
9246   match(Set mem (StoreVector mem src));
9247   ins_cost(MEMORY_REF_COST);
9248   size(4);
9249   format %{ "FSTD   $src,$mem\t! store vector (8 bytes)" %}
9250   ins_encode %{
9251     __ str_double($src$$FloatRegister, $mem$$Address);
9252   %}
9253   ins_pipe(fstoreD_mem_reg);
9254 %}
9255 
9256 // Store Vector in Double Register Pair to memory
9257 instruct storeV16(memoryvld mem, vecX src) %{
9258   predicate(n->as_StoreVector()->memory_size() == 16);
9259   match(Set mem (StoreVector mem src));
9260   ins_cost(MEMORY_REF_COST);
9261   size(4);
9262   format %{ "VST1   $src,$mem\t! store vector (16 bytes)" %}
9263   ins_encode %{
9264     __ vst1($src$$FloatRegister, $mem$$Address, MacroAssembler::VELEM_SIZE_16, 128);
9265   %}
9266   ins_pipe(fstoreD_mem_reg); // FIXME
9267 %}
9268 
9269 // Replicate scalar to packed byte values in Double register
9270 instruct Repl8B_reg(vecD dst, iRegI src, iRegI tmp) %{
9271   predicate(n->as_Vector()->length() == 8);
9272   match(Set dst (ReplicateB src));
9273   ins_cost(DEFAULT_COST*4);
9274   effect(TEMP tmp);
9275   size(16);
9276 
9277   // FIXME: could use PKH instruction instead?
9278   format %{ "LSL      $tmp, $src, 24 \n\t"
9279             "OR       $tmp, $tmp, ($tmp >> 8) \n\t"
9280             "OR       $tmp, $tmp, ($tmp >> 16) \n\t"
9281             "FMDRR    $dst,$tmp,$tmp\t" %}
9282   ins_encode %{
9283     __ mov($tmp$$Register, AsmOperand($src$$Register, lsl, 24));
9284     __ orr($tmp$$Register, $tmp$$Register, AsmOperand($tmp$$Register, lsr, 8));
9285     __ orr($tmp$$Register, $tmp$$Register, AsmOperand($tmp$$Register, lsr, 16));
9286     __ fmdrr($dst$$FloatRegister, $tmp$$Register, $tmp$$Register);
9287   %}
9288   ins_pipe(ialu_reg); // FIXME
9289 %}
9290 
9291 // Replicate scalar to packed byte values in Double register
9292 instruct Repl8B_reg_simd(vecD dst, iRegI src) %{
9293   predicate(n->as_Vector()->length_in_bytes() == 8 && VM_Version::has_simd());
9294   match(Set dst (ReplicateB src));
9295   size(4);
9296 
9297   format %{ "VDUP.8 $dst,$src\t" %}
9298   ins_encode %{
9299     bool quad = false;
9300     __ vdupI($dst$$FloatRegister, $src$$Register,
9301              MacroAssembler::VELEM_SIZE_8, quad);
9302   %}
9303   ins_pipe(ialu_reg); // FIXME
9304 %}
9305 
9306 // Replicate scalar to packed byte values in Double register pair
9307 instruct Repl16B_reg(vecX dst, iRegI src) %{
9308   predicate(n->as_Vector()->length_in_bytes() == 16);
9309   match(Set dst (ReplicateB src));
9310   size(4);
9311 
9312   format %{ "VDUP.8 $dst.Q,$src\t" %}
9313   ins_encode %{
9314     bool quad = true;
9315     __ vdupI($dst$$FloatRegister, $src$$Register,
9316              MacroAssembler::VELEM_SIZE_8, quad);
9317   %}
9318   ins_pipe(ialu_reg); // FIXME
9319 %}
9320 
9321 // Replicate scalar constant to packed byte values in Double register
9322 instruct Repl8B_immI(vecD dst, immI src, iRegI tmp) %{
9323   predicate(n->as_Vector()->length() == 8);
9324   match(Set dst (ReplicateB src));
9325   ins_cost(DEFAULT_COST*2);
9326   effect(TEMP tmp);
9327   size(12);
9328 
9329   format %{ "MOV      $tmp, Repl4($src))\n\t"
9330             "FMDRR    $dst,$tmp,$tmp\t" %}
9331   ins_encode( LdReplImmI(src, dst, tmp, (4), (1)) );
9332   ins_pipe(loadConFD); // FIXME
9333 %}
9334 
9335 // Replicate scalar constant to packed byte values in Double register
9336 // TODO: support negative constants with MVNI?
9337 instruct Repl8B_immU8(vecD dst, immU8 src) %{
9338   predicate(n->as_Vector()->length_in_bytes() == 8 && VM_Version::has_simd());
9339   match(Set dst (ReplicateB src));
9340   size(4);
9341 
9342   format %{ "VMOV.U8  $dst,$src" %}
9343   ins_encode %{
9344     bool quad = false;
9345     __ vmovI($dst$$FloatRegister, $src$$constant,
9346              MacroAssembler::VELEM_SIZE_8, quad);
9347   %}
9348   ins_pipe(loadConFD); // FIXME
9349 %}
9350 
9351 // Replicate scalar constant to packed byte values in Double register pair
9352 instruct Repl16B_immU8(vecX dst, immU8 src) %{
9353   predicate(n->as_Vector()->length_in_bytes() == 16 && VM_Version::has_simd());
9354   match(Set dst (ReplicateB src));
9355   size(4);
9356 
9357   format %{ "VMOV.U8  $dst.Q,$src" %}
9358   ins_encode %{
9359     bool quad = true;
9360     __ vmovI($dst$$FloatRegister, $src$$constant,
9361              MacroAssembler::VELEM_SIZE_8, quad);
9362   %}
9363   ins_pipe(loadConFD); // FIXME
9364 %}
9365 
9366 // Replicate scalar to packed short/char values into Double register
9367 instruct Repl4S_reg(vecD dst, iRegI src, iRegI tmp) %{
9368   predicate(n->as_Vector()->length() == 4);
9369   match(Set dst (ReplicateS src));
9370   ins_cost(DEFAULT_COST*3);
9371   effect(TEMP tmp);
9372   size(12);
9373 
9374   // FIXME: could use PKH instruction instead?
9375   format %{ "LSL      $tmp, $src, 16 \n\t"
9376             "OR       $tmp, $tmp, ($tmp >> 16) \n\t"
9377             "FMDRR    $dst,$tmp,$tmp\t" %}
9378   ins_encode %{
9379     __ mov($tmp$$Register, AsmOperand($src$$Register, lsl, 16));
9380     __ orr($tmp$$Register, $tmp$$Register, AsmOperand($tmp$$Register, lsr, 16));
9381     __ fmdrr($dst$$FloatRegister, $tmp$$Register, $tmp$$Register);
9382   %}
9383   ins_pipe(ialu_reg); // FIXME
9384 %}
9385 
9386 // Replicate scalar to packed byte values in Double register
9387 instruct Repl4S_reg_simd(vecD dst, iRegI src) %{
9388   predicate(n->as_Vector()->length_in_bytes() == 8 && VM_Version::has_simd());
9389   match(Set dst (ReplicateS src));
9390   size(4);
9391 
9392   format %{ "VDUP.16 $dst,$src\t" %}
9393   ins_encode %{
9394     bool quad = false;
9395     __ vdupI($dst$$FloatRegister, $src$$Register,
9396              MacroAssembler::VELEM_SIZE_16, quad);
9397   %}
9398   ins_pipe(ialu_reg); // FIXME
9399 %}
9400 
9401 // Replicate scalar to packed byte values in Double register pair
9402 instruct Repl8S_reg(vecX dst, iRegI src) %{
9403   predicate(n->as_Vector()->length_in_bytes() == 16 && VM_Version::has_simd());
9404   match(Set dst (ReplicateS src));
9405   size(4);
9406 
9407   format %{ "VDUP.16 $dst.Q,$src\t" %}
9408   ins_encode %{
9409     bool quad = true;
9410     __ vdupI($dst$$FloatRegister, $src$$Register,
9411              MacroAssembler::VELEM_SIZE_16, quad);
9412   %}
9413   ins_pipe(ialu_reg); // FIXME
9414 %}
9415 
9416 
9417 // Replicate scalar constant to packed short/char values in Double register
9418 instruct Repl4S_immI(vecD dst, immI src, iRegP tmp) %{
9419   predicate(n->as_Vector()->length() == 4);
9420   match(Set dst (ReplicateS src));
9421   effect(TEMP tmp);
9422   size(12);
9423   ins_cost(DEFAULT_COST*4); // FIXME
9424 
9425   format %{ "MOV      $tmp, Repl2($src))\n\t"
9426             "FMDRR    $dst,$tmp,$tmp\t" %}
9427   ins_encode( LdReplImmI(src, dst, tmp, (2), (2)) );
9428   ins_pipe(loadConFD); // FIXME
9429 %}
9430 
9431 // Replicate scalar constant to packed byte values in Double register
9432 instruct Repl4S_immU8(vecD dst, immU8 src) %{
9433   predicate(n->as_Vector()->length_in_bytes() == 8 && VM_Version::has_simd());
9434   match(Set dst (ReplicateS src));
9435   size(4);
9436 
9437   format %{ "VMOV.U16  $dst,$src" %}
9438   ins_encode %{
9439     bool quad = false;
9440     __ vmovI($dst$$FloatRegister, $src$$constant,
9441              MacroAssembler::VELEM_SIZE_16, quad);
9442   %}
9443   ins_pipe(loadConFD); // FIXME
9444 %}
9445 
9446 // Replicate scalar constant to packed byte values in Double register pair
9447 instruct Repl8S_immU8(vecX dst, immU8 src) %{
9448   predicate(n->as_Vector()->length_in_bytes() == 16 && VM_Version::has_simd());
9449   match(Set dst (ReplicateS src));
9450   size(4);
9451 
9452   format %{ "VMOV.U16  $dst.Q,$src" %}
9453   ins_encode %{
9454     bool quad = true;
9455     __ vmovI($dst$$FloatRegister, $src$$constant,
9456              MacroAssembler::VELEM_SIZE_16, quad);
9457   %}
9458   ins_pipe(loadConFD); // FIXME
9459 %}
9460 
9461 // Replicate scalar to packed int values in Double register
9462 instruct Repl2I_reg(vecD dst, iRegI src) %{
9463   predicate(n->as_Vector()->length() == 2);
9464   match(Set dst (ReplicateI src));
9465   size(4);
9466 
9467   format %{ "FMDRR    $dst,$src,$src\t" %}
9468   ins_encode %{
9469     __ fmdrr($dst$$FloatRegister, $src$$Register, $src$$Register);
9470   %}
9471   ins_pipe(ialu_reg); // FIXME
9472 %}
9473 
9474 // Replicate scalar to packed int values in Double register pair
9475 instruct Repl4I_reg(vecX dst, iRegI src) %{
9476   predicate(n->as_Vector()->length() == 4);
9477   match(Set dst (ReplicateI src));
9478   ins_cost(DEFAULT_COST*2);
9479   size(8);
9480 
9481   format %{ "FMDRR    $dst.lo,$src,$src\n\t"
9482             "FMDRR    $dst.hi,$src,$src" %}
9483 
9484   ins_encode %{
9485     __ fmdrr($dst$$FloatRegister, $src$$Register, $src$$Register);
9486     __ fmdrr($dst$$FloatRegister->successor()->successor(),
9487              $src$$Register, $src$$Register);
9488   %}
9489   ins_pipe(ialu_reg); // FIXME
9490 %}
9491 
9492 // Replicate scalar to packed int values in Double register
9493 instruct Repl2I_reg_simd(vecD dst, iRegI src) %{
9494   predicate(n->as_Vector()->length_in_bytes() == 8 && VM_Version::has_simd());
9495   match(Set dst (ReplicateI src));
9496   size(4);
9497 
9498   format %{ "VDUP.32 $dst.D,$src\t" %}
9499   ins_encode %{
9500     bool quad = false;
9501     __ vdupI($dst$$FloatRegister, $src$$Register,
9502              MacroAssembler::VELEM_SIZE_32, quad);
9503   %}
9504   ins_pipe(ialu_reg); // FIXME
9505 %}
9506 
9507 // Replicate scalar to packed int values in Double register pair
9508 instruct Repl4I_reg_simd(vecX dst, iRegI src) %{
9509   predicate(n->as_Vector()->length_in_bytes() == 16 && VM_Version::has_simd());
9510   match(Set dst (ReplicateI src));
9511   size(4);
9512 
9513   format %{ "VDUP.32 $dst.Q,$src\t" %}
9514   ins_encode %{
9515     bool quad = true;
9516     __ vdupI($dst$$FloatRegister, $src$$Register,
9517              MacroAssembler::VELEM_SIZE_32, quad);
9518   %}
9519   ins_pipe(ialu_reg); // FIXME
9520 %}
9521 
9522 
9523 // Replicate scalar zero constant to packed int values in Double register
9524 instruct Repl2I_immI(vecD dst, immI src, iRegI tmp) %{
9525   predicate(n->as_Vector()->length() == 2);
9526   match(Set dst (ReplicateI src));
9527   effect(TEMP tmp);
9528   size(12);
9529   ins_cost(DEFAULT_COST*4); // FIXME
9530 
9531   format %{ "MOV      $tmp, Repl1($src))\n\t"
9532             "FMDRR    $dst,$tmp,$tmp\t" %}
9533   ins_encode( LdReplImmI(src, dst, tmp, (1), (4)) );
9534   ins_pipe(loadConFD); // FIXME
9535 %}
9536 
9537 // Replicate scalar constant to packed byte values in Double register
9538 instruct Repl2I_immU8(vecD dst, immU8 src) %{
9539   predicate(n->as_Vector()->length_in_bytes() == 8 && VM_Version::has_simd());
9540   match(Set dst (ReplicateI src));
9541   size(4);
9542 
9543   format %{ "VMOV.I32  $dst.D,$src" %}
9544   ins_encode %{
9545     bool quad = false;
9546     __ vmovI($dst$$FloatRegister, $src$$constant,
9547              MacroAssembler::VELEM_SIZE_32, quad);
9548   %}
9549   ins_pipe(loadConFD); // FIXME
9550 %}
9551 
9552 // Replicate scalar constant to packed byte values in Double register pair
9553 instruct Repl4I_immU8(vecX dst, immU8 src) %{
9554   predicate(n->as_Vector()->length_in_bytes() == 16 && VM_Version::has_simd());
9555   match(Set dst (ReplicateI src));
9556   size(4);
9557 
9558   format %{ "VMOV.I32  $dst.Q,$src" %}
9559   ins_encode %{
9560     bool quad = true;
9561     __ vmovI($dst$$FloatRegister, $src$$constant,
9562              MacroAssembler::VELEM_SIZE_32, quad);
9563   %}
9564   ins_pipe(loadConFD); // FIXME
9565 %}
9566 
9567 // Replicate scalar to packed byte values in Double register pair
9568 instruct Repl2L_reg(vecX dst, iRegL src) %{
9569   predicate(n->as_Vector()->length() == 2);
9570   match(Set dst (ReplicateL src));
9571   size(8);
9572   ins_cost(DEFAULT_COST*2); // FIXME
9573 
9574   format %{ "FMDRR $dst.D,$src.lo,$src.hi\t\n"
9575             "FMDRR $dst.D.next,$src.lo,$src.hi" %}
9576   ins_encode %{
9577     __ fmdrr($dst$$FloatRegister, $src$$Register, $src$$Register->successor());
9578     __ fmdrr($dst$$FloatRegister->successor()->successor(),
9579              $src$$Register, $src$$Register->successor());
9580   %}
9581   ins_pipe(ialu_reg); // FIXME
9582 %}
9583 
9584 
9585 // Replicate scalar to packed float values in Double register
9586 instruct Repl2F_regI(vecD dst, iRegI src) %{
9587   predicate(n->as_Vector()->length() == 2);
9588   match(Set dst (ReplicateF src));
9589   size(4);
9590 
9591   format %{ "FMDRR    $dst.D,$src,$src\t" %}
9592   ins_encode %{
9593     __ fmdrr($dst$$FloatRegister, $src$$Register, $src$$Register);
9594   %}
9595   ins_pipe(ialu_reg); // FIXME
9596 %}
9597 
9598 // Replicate scalar to packed float values in Double register
9599 instruct Repl2F_reg_vfp(vecD dst, regF src) %{
9600   predicate(n->as_Vector()->length() == 2);
9601   match(Set dst (ReplicateF src));
9602   size(4*2);
9603   ins_cost(DEFAULT_COST*2); // FIXME
9604 
9605   expand %{
9606     iRegI tmp;
9607     MoveF2I_reg_reg(tmp, src);
9608     Repl2F_regI(dst,tmp);
9609   %}
9610 %}
9611 
9612 // Replicate scalar to packed float values in Double register
9613 instruct Repl2F_reg_simd(vecD dst, regF src) %{
9614   predicate(n->as_Vector()->length_in_bytes() == 8 && VM_Version::has_simd());
9615   match(Set dst (ReplicateF src));
9616   size(4);
9617   ins_cost(DEFAULT_COST); // FIXME
9618 
9619   format %{ "VDUP.32  $dst.D,$src.D\t" %}
9620   ins_encode %{
9621     bool quad = false;
9622     __ vdupF($dst$$FloatRegister, $src$$FloatRegister, quad);
9623   %}
9624   ins_pipe(ialu_reg); // FIXME
9625 %}
9626 
9627 // Replicate scalar to packed float values in Double register pair
9628 instruct Repl4F_reg(vecX dst, regF src, iRegI tmp) %{
9629   predicate(n->as_Vector()->length() == 4);
9630   match(Set dst (ReplicateF src));
9631   effect(TEMP tmp);
9632   size(4*3);
9633   ins_cost(DEFAULT_COST*3); // FIXME
9634 
9635   format %{ "FMRS     $tmp,$src\n\t"
9636             "FMDRR    $dst.D,$tmp,$tmp\n\t"
9637             "FMDRR    $dst.D.next,$tmp,$tmp\t" %}
9638   ins_encode %{
9639     __ fmrs($tmp$$Register, $src$$FloatRegister);
9640     __ fmdrr($dst$$FloatRegister, $tmp$$Register, $tmp$$Register);
9641     __ fmdrr($dst$$FloatRegister->successor()->successor(),
9642              $tmp$$Register, $tmp$$Register);
9643   %}
9644   ins_pipe(ialu_reg); // FIXME
9645 %}
9646 
9647 // Replicate scalar to packed float values in Double register pair
9648 instruct Repl4F_reg_simd(vecX dst, regF src) %{
9649   predicate(n->as_Vector()->length_in_bytes() == 16 && VM_Version::has_simd());
9650   match(Set dst (ReplicateF src));
9651   size(4);
9652   ins_cost(DEFAULT_COST); // FIXME
9653 
9654   format %{ "VDUP.32  $dst.Q,$src.D\t" %}
9655   ins_encode %{
9656     bool quad = true;
9657     __ vdupF($dst$$FloatRegister, $src$$FloatRegister, quad);
9658   %}
9659   ins_pipe(ialu_reg); // FIXME
9660 %}
9661 
9662 // Replicate scalar zero constant to packed float values in Double register
9663 instruct Repl2F_immI(vecD dst, immF src, iRegI tmp) %{
9664   predicate(n->as_Vector()->length() == 2);
9665   match(Set dst (ReplicateF src));
9666   effect(TEMP tmp);
9667   size(12);
9668   ins_cost(DEFAULT_COST*4); // FIXME
9669 
9670   format %{ "MOV      $tmp, Repl1($src))\n\t"
9671             "FMDRR    $dst,$tmp,$tmp\t" %}
9672   ins_encode( LdReplImmF(src, dst, tmp) );
9673   ins_pipe(loadConFD); // FIXME
9674 %}
9675 
9676 // Replicate scalar to packed double float values in Double register pair
9677 instruct Repl2D_reg(vecX dst, regD src) %{
9678   predicate(n->as_Vector()->length() == 2);
9679   match(Set dst (ReplicateD src));
9680   size(4*2);
9681   ins_cost(DEFAULT_COST*2); // FIXME
9682 
9683   format %{ "FCPYD    $dst.D.a,$src\n\t"
9684             "FCPYD    $dst.D.b,$src\t" %}
9685   ins_encode %{
9686     FloatRegister dsta = $dst$$FloatRegister;
9687     FloatRegister src = $src$$FloatRegister;
9688     __ fcpyd(dsta, src);
9689     FloatRegister dstb = dsta->successor()->successor();
9690     __ fcpyd(dstb, src);
9691   %}
9692   ins_pipe(ialu_reg); // FIXME
9693 %}
9694 
9695 // ====================VECTOR ARITHMETIC=======================================
9696 
9697 // --------------------------------- ADD --------------------------------------
9698 
9699 // Bytes vector add
9700 instruct vadd8B_reg(vecD dst, vecD src1, vecD src2) %{
9701   predicate(n->as_Vector()->length() == 8);
9702   match(Set dst (AddVB src1 src2));
9703   format %{ "VADD.I8 $dst,$src1,$src2\t! add packed8B" %}
9704   size(4);
9705   ins_encode %{
9706     bool quad = false;
9707     __ vaddI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
9708              MacroAssembler::VELEM_SIZE_8, quad);
9709   %}
9710   ins_pipe( ialu_reg_reg ); // FIXME
9711 %}
9712 
9713 instruct vadd16B_reg(vecX dst, vecX src1, vecX src2) %{
9714   predicate(n->as_Vector()->length() == 16);
9715   match(Set dst (AddVB src1 src2));
9716   size(4);
9717   format %{ "VADD.I8 $dst.Q,$src1.Q,$src2.Q\t! add packed16B" %}
9718   ins_encode %{
9719     bool quad = true;
9720     __ vaddI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
9721              MacroAssembler::VELEM_SIZE_8, quad);
9722   %}
9723   ins_pipe( ialu_reg_reg ); // FIXME
9724 %}
9725 
9726 // Shorts/Chars vector add
9727 instruct vadd4S_reg(vecD dst, vecD src1, vecD src2) %{
9728   predicate(n->as_Vector()->length() == 4);
9729   match(Set dst (AddVS src1 src2));
9730   size(4);
9731   format %{ "VADD.I16 $dst,$src1,$src2\t! add packed4S" %}
9732   ins_encode %{
9733     bool quad = false;
9734     __ vaddI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
9735              MacroAssembler::VELEM_SIZE_16, quad);
9736   %}
9737   ins_pipe( ialu_reg_reg ); // FIXME
9738 %}
9739 
9740 instruct vadd8S_reg(vecX dst, vecX src1, vecX src2) %{
9741   predicate(n->as_Vector()->length() == 8);
9742   match(Set dst (AddVS src1 src2));
9743   size(4);
9744   format %{ "VADD.I16 $dst.Q,$src1.Q,$src2.Q\t! add packed8S" %}
9745   ins_encode %{
9746     bool quad = true;
9747     __ vaddI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
9748              MacroAssembler::VELEM_SIZE_16, quad);
9749   %}
9750   ins_pipe( ialu_reg_reg ); // FIXME
9751 %}
9752 
9753 // Integers vector add
9754 instruct vadd2I_reg(vecD dst, vecD src1, vecD src2) %{
9755   predicate(n->as_Vector()->length() == 2);
9756   match(Set dst (AddVI src1 src2));
9757   size(4);
9758   format %{ "VADD.I32 $dst.D,$src1.D,$src2.D\t! add packed2I" %}
9759   ins_encode %{
9760     bool quad = false;
9761     __ vaddI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
9762              MacroAssembler::VELEM_SIZE_32, quad);
9763   %}
9764   ins_pipe( ialu_reg_reg ); // FIXME
9765 %}
9766 
9767 instruct vadd4I_reg(vecX dst, vecX src1, vecX src2) %{
9768   predicate(n->as_Vector()->length() == 4);
9769   match(Set dst (AddVI src1 src2));
9770   size(4);
9771   format %{ "VADD.I32 $dst.Q,$src1.Q,$src2.Q\t! add packed4I" %}
9772   ins_encode %{
9773     bool quad = true;
9774     __ vaddI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
9775              MacroAssembler::VELEM_SIZE_32, quad);
9776   %}
9777   ins_pipe( ialu_reg_reg ); // FIXME
9778 %}
9779 
9780 // Longs vector add
9781 instruct vadd2L_reg(vecX dst, vecX src1, vecX src2) %{
9782   predicate(n->as_Vector()->length() == 2);
9783   match(Set dst (AddVL src1 src2));
9784   size(4);
9785   format %{ "VADD.I64 $dst.Q,$src1.Q,$src2.Q\t! add packed2L" %}
9786   ins_encode %{
9787     bool quad = true;
9788     __ vaddI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
9789              MacroAssembler::VELEM_SIZE_64, quad);
9790   %}
9791   ins_pipe( ialu_reg_reg ); // FIXME
9792 %}
9793 
9794 // Floats vector add
9795 instruct vadd2F_reg(vecD dst, vecD src1, vecD src2) %{
9796   predicate(n->as_Vector()->length() == 2 && VM_Version::simd_math_is_compliant());
9797   match(Set dst (AddVF src1 src2));
9798   size(4);
9799   format %{ "VADD.F32 $dst,$src1,$src2\t! add packed2F" %}
9800   ins_encode %{
9801     bool quad = false;
9802     __ vaddF($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
9803              MacroAssembler::VFA_SIZE_F32, quad);
9804   %}
9805   ins_pipe( faddD_reg_reg ); // FIXME
9806 %}
9807 
9808 instruct vadd2F_reg_vfp(vecD dst, vecD src1, vecD src2) %{
9809   predicate(n->as_Vector()->length() == 2 && !VM_Version::simd_math_is_compliant());
9810   match(Set dst (AddVF src1 src2));
9811   ins_cost(DEFAULT_COST*2); // FIXME
9812 
9813   size(4*2);
9814   format %{ "FADDS  $dst.a,$src1.a,$src2.a\n\t"
9815             "FADDS  $dst.b,$src1.b,$src2.b" %}
9816   ins_encode %{
9817     __ add_float($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
9818     __ add_float($dst$$FloatRegister->successor(),
9819              $src1$$FloatRegister->successor(),
9820              $src2$$FloatRegister->successor());
9821   %}
9822 
9823   ins_pipe(faddF_reg_reg); // FIXME
9824 %}
9825 
9826 instruct vadd4F_reg_simd(vecX dst, vecX src1, vecX src2) %{
9827   predicate(n->as_Vector()->length() == 4 && VM_Version::simd_math_is_compliant());
9828   match(Set dst (AddVF src1 src2));
9829   size(4);
9830   format %{ "VADD.F32 $dst.Q,$src1.Q,$src2.Q\t! add packed4F" %}
9831   ins_encode %{
9832     bool quad = true;
9833     __ vaddF($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
9834              MacroAssembler::VFA_SIZE_F32, quad);
9835   %}
9836   ins_pipe( faddD_reg_reg ); // FIXME
9837 %}
9838 
9839 instruct vadd4F_reg_vfp(vecX dst, vecX src1, vecX src2) %{
9840   predicate(n->as_Vector()->length() == 4 && !VM_Version::simd_math_is_compliant());
9841   match(Set dst (AddVF src1 src2));
9842   size(4*4);
9843   ins_cost(DEFAULT_COST*4); // FIXME
9844 
9845   format %{ "FADDS  $dst.a,$src1.a,$src2.a\n\t"
9846             "FADDS  $dst.b,$src1.b,$src2.b\n\t"
9847             "FADDS  $dst.c,$src1.c,$src2.c\n\t"
9848             "FADDS  $dst.d,$src1.d,$src2.d" %}
9849 
9850   ins_encode %{
9851     FloatRegister dsta = $dst$$FloatRegister;
9852     FloatRegister src1a = $src1$$FloatRegister;
9853     FloatRegister src2a = $src2$$FloatRegister;
9854     __ add_float(dsta, src1a, src2a);
9855     FloatRegister dstb = dsta->successor();
9856     FloatRegister src1b = src1a->successor();
9857     FloatRegister src2b = src2a->successor();
9858     __ add_float(dstb, src1b, src2b);
9859     FloatRegister dstc = dstb->successor();
9860     FloatRegister src1c = src1b->successor();
9861     FloatRegister src2c = src2b->successor();
9862     __ add_float(dstc, src1c, src2c);
9863     FloatRegister dstd = dstc->successor();
9864     FloatRegister src1d = src1c->successor();
9865     FloatRegister src2d = src2c->successor();
9866     __ add_float(dstd, src1d, src2d);
9867   %}
9868 
9869   ins_pipe(faddF_reg_reg); // FIXME
9870 %}
9871 
9872 instruct vadd2D_reg_vfp(vecX dst, vecX src1, vecX src2) %{
9873   predicate(n->as_Vector()->length() == 2);
9874   match(Set dst (AddVD src1 src2));
9875   size(4*2);
9876   ins_cost(DEFAULT_COST*2); // FIXME
9877 
9878   format %{ "FADDD  $dst.a,$src1.a,$src2.a\n\t"
9879             "FADDD  $dst.b,$src1.b,$src2.b" %}
9880 
9881   ins_encode %{
9882     FloatRegister dsta = $dst$$FloatRegister;
9883     FloatRegister src1a = $src1$$FloatRegister;
9884     FloatRegister src2a = $src2$$FloatRegister;
9885     __ add_double(dsta, src1a, src2a);
9886     FloatRegister dstb = dsta->successor()->successor();
9887     FloatRegister src1b = src1a->successor()->successor();
9888     FloatRegister src2b = src2a->successor()->successor();
9889     __ add_double(dstb, src1b, src2b);
9890   %}
9891 
9892   ins_pipe(faddF_reg_reg); // FIXME
9893 %}
9894 
9895 
9896 // Bytes vector sub
9897 instruct vsub8B_reg(vecD dst, vecD src1, vecD src2) %{
9898   predicate(n->as_Vector()->length() == 8);
9899   match(Set dst (SubVB src1 src2));
9900   size(4);
9901   format %{ "VSUB.I8 $dst,$src1,$src2\t! sub packed8B" %}
9902   ins_encode %{
9903     bool quad = false;
9904     __ vsubI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
9905              MacroAssembler::VELEM_SIZE_8, quad);
9906   %}
9907   ins_pipe( ialu_reg_reg ); // FIXME
9908 %}
9909 
9910 instruct vsub16B_reg(vecX dst, vecX src1, vecX src2) %{
9911   predicate(n->as_Vector()->length() == 16);
9912   match(Set dst (SubVB src1 src2));
9913   size(4);
9914   format %{ "VSUB.I8 $dst.Q,$src1.Q,$src2.Q\t! sub packed16B" %}
9915   ins_encode %{
9916     bool quad = true;
9917     __ vsubI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
9918              MacroAssembler::VELEM_SIZE_8, quad);
9919   %}
9920   ins_pipe( ialu_reg_reg ); // FIXME
9921 %}
9922 
9923 // Shorts/Chars vector sub
9924 instruct vsub4S_reg(vecD dst, vecD src1, vecD src2) %{
9925   predicate(n->as_Vector()->length() == 4);
9926   match(Set dst (SubVS src1 src2));
9927   size(4);
9928   format %{ "VSUB.I16 $dst,$src1,$src2\t! sub packed4S" %}
9929   ins_encode %{
9930     bool quad = false;
9931     __ vsubI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
9932              MacroAssembler::VELEM_SIZE_16, quad);
9933   %}
9934   ins_pipe( ialu_reg_reg ); // FIXME
9935 %}
9936 
9937 instruct vsub16S_reg(vecX dst, vecX src1, vecX src2) %{
9938   predicate(n->as_Vector()->length() == 8);
9939   match(Set dst (SubVS src1 src2));
9940   size(4);
9941   format %{ "VSUB.I16 $dst.Q,$src1.Q,$src2.Q\t! sub packed8S" %}
9942   ins_encode %{
9943     bool quad = true;
9944     __ vsubI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
9945              MacroAssembler::VELEM_SIZE_16, quad);
9946   %}
9947   ins_pipe( ialu_reg_reg ); // FIXME
9948 %}
9949 
9950 // Integers vector sub
9951 instruct vsub2I_reg(vecD dst, vecD src1, vecD src2) %{
9952   predicate(n->as_Vector()->length() == 2);
9953   match(Set dst (SubVI src1 src2));
9954   size(4);
9955   format %{ "VSUB.I32 $dst,$src1,$src2\t! sub packed2I" %}
9956   ins_encode %{
9957     bool quad = false;
9958     __ vsubI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
9959              MacroAssembler::VELEM_SIZE_32, quad);
9960   %}
9961   ins_pipe( ialu_reg_reg ); // FIXME
9962 %}
9963 
9964 instruct vsub4I_reg(vecX dst, vecX src1, vecX src2) %{
9965   predicate(n->as_Vector()->length() == 4);
9966   match(Set dst (SubVI src1 src2));
9967   size(4);
9968   format %{ "VSUB.I32 $dst.Q,$src1.Q,$src2.Q\t! sub packed4I" %}
9969   ins_encode %{
9970     bool quad = true;
9971     __ vsubI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
9972              MacroAssembler::VELEM_SIZE_32, quad);
9973   %}
9974   ins_pipe( ialu_reg_reg ); // FIXME
9975 %}
9976 
9977 // Longs vector sub
9978 instruct vsub2L_reg(vecX dst, vecX src1, vecX src2) %{
9979   predicate(n->as_Vector()->length() == 2);
9980   match(Set dst (SubVL src1 src2));
9981   size(4);
9982   format %{ "VSUB.I64 $dst.Q,$src1.Q,$src2.Q\t! sub packed2L" %}
9983   ins_encode %{
9984     bool quad = true;
9985     __ vsubI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
9986              MacroAssembler::VELEM_SIZE_64, quad);
9987   %}
9988   ins_pipe( ialu_reg_reg ); // FIXME
9989 %}
9990 
9991 // Floats vector sub
9992 instruct vsub2F_reg(vecD dst, vecD src1, vecD src2) %{
9993   predicate(n->as_Vector()->length() == 2 && VM_Version::simd_math_is_compliant());
9994   match(Set dst (SubVF src1 src2));
9995   size(4);
9996   format %{ "VSUB.F32 $dst,$src1,$src2\t! sub packed2F" %}
9997   ins_encode %{
9998     bool quad = false;
9999     __ vsubF($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
10000              MacroAssembler::VFA_SIZE_F32, quad);
10001   %}
10002   ins_pipe( faddF_reg_reg ); // FIXME
10003 %}
10004 
10005 instruct vsub2F_reg_vfp(vecD dst, vecD src1, vecD src2) %{
10006   predicate(n->as_Vector()->length() == 2 && !VM_Version::simd_math_is_compliant());
10007   match(Set dst (SubVF src1 src2));
10008   size(4*2);
10009   ins_cost(DEFAULT_COST*2); // FIXME
10010 
10011   format %{ "FSUBS  $dst.a,$src1.a,$src2.a\n\t"
10012             "FSUBS  $dst.b,$src1.b,$src2.b" %}
10013 
10014   ins_encode %{
10015     FloatRegister dsta = $dst$$FloatRegister;
10016     FloatRegister src1a = $src1$$FloatRegister;
10017     FloatRegister src2a = $src2$$FloatRegister;
10018     __ sub_float(dsta, src1a, src2a);
10019     FloatRegister dstb = dsta->successor();
10020     FloatRegister src1b = src1a->successor();
10021     FloatRegister src2b = src2a->successor();
10022     __ sub_float(dstb, src1b, src2b);
10023   %}
10024 
10025   ins_pipe(faddF_reg_reg); // FIXME
10026 %}
10027 
10028 
10029 instruct vsub4F_reg(vecX dst, vecX src1, vecX src2) %{
10030   predicate(n->as_Vector()->length() == 4 && VM_Version::simd_math_is_compliant());
10031   match(Set dst (SubVF src1 src2));
10032   size(4);
10033   format %{ "VSUB.F32 $dst.Q,$src1.Q,$src2.Q\t! sub packed4F" %}
10034   ins_encode %{
10035     bool quad = true;
10036     __ vsubF($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
10037              MacroAssembler::VFA_SIZE_F32, quad);
10038   %}
10039   ins_pipe( faddF_reg_reg ); // FIXME
10040 %}
10041 
10042 instruct vsub4F_reg_vfp(vecX dst, vecX src1, vecX src2) %{
10043   predicate(n->as_Vector()->length() == 4 && !VM_Version::simd_math_is_compliant());
10044   match(Set dst (SubVF src1 src2));
10045   size(4*4);
10046   ins_cost(DEFAULT_COST*4); // FIXME
10047 
10048   format %{ "FSUBS  $dst.a,$src1.a,$src2.a\n\t"
10049             "FSUBS  $dst.b,$src1.b,$src2.b\n\t"
10050             "FSUBS  $dst.c,$src1.c,$src2.c\n\t"
10051             "FSUBS  $dst.d,$src1.d,$src2.d" %}
10052 
10053   ins_encode %{
10054     FloatRegister dsta = $dst$$FloatRegister;
10055     FloatRegister src1a = $src1$$FloatRegister;
10056     FloatRegister src2a = $src2$$FloatRegister;
10057     __ sub_float(dsta, src1a, src2a);
10058     FloatRegister dstb = dsta->successor();
10059     FloatRegister src1b = src1a->successor();
10060     FloatRegister src2b = src2a->successor();
10061     __ sub_float(dstb, src1b, src2b);
10062     FloatRegister dstc = dstb->successor();
10063     FloatRegister src1c = src1b->successor();
10064     FloatRegister src2c = src2b->successor();
10065     __ sub_float(dstc, src1c, src2c);
10066     FloatRegister dstd = dstc->successor();
10067     FloatRegister src1d = src1c->successor();
10068     FloatRegister src2d = src2c->successor();
10069     __ sub_float(dstd, src1d, src2d);
10070   %}
10071 
10072   ins_pipe(faddF_reg_reg); // FIXME
10073 %}
10074 
10075 instruct vsub2D_reg_vfp(vecX dst, vecX src1, vecX src2) %{
10076   predicate(n->as_Vector()->length() == 2);
10077   match(Set dst (SubVD src1 src2));
10078   size(4*2);
10079   ins_cost(DEFAULT_COST*2); // FIXME
10080 
10081   format %{ "FSUBD  $dst.a,$src1.a,$src2.a\n\t"
10082             "FSUBD  $dst.b,$src1.b,$src2.b" %}
10083 
10084   ins_encode %{
10085     FloatRegister dsta = $dst$$FloatRegister;
10086     FloatRegister src1a = $src1$$FloatRegister;
10087     FloatRegister src2a = $src2$$FloatRegister;
10088     __ sub_double(dsta, src1a, src2a);
10089     FloatRegister dstb = dsta->successor()->successor();
10090     FloatRegister src1b = src1a->successor()->successor();
10091     FloatRegister src2b = src2a->successor()->successor();
10092     __ sub_double(dstb, src1b, src2b);
10093   %}
10094 
10095   ins_pipe(faddF_reg_reg); // FIXME
10096 %}
10097 
10098 // Shorts/Chars vector mul
10099 instruct vmul4S_reg(vecD dst, vecD src1, vecD src2) %{
10100   predicate(n->as_Vector()->length() == 4);
10101   match(Set dst (MulVS src1 src2));
10102   size(4);
10103   format %{ "VMUL.I16 $dst,$src1,$src2\t! mul packed4S" %}
10104   ins_encode %{
10105     __ vmulI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
10106              MacroAssembler::VELEM_SIZE_16, 0);
10107   %}
10108   ins_pipe( ialu_reg_reg ); // FIXME
10109 %}
10110 
10111 instruct vmul8S_reg(vecX dst, vecX src1, vecX src2) %{
10112   predicate(n->as_Vector()->length() == 8);
10113   match(Set dst (MulVS src1 src2));
10114   size(4);
10115   format %{ "VMUL.I16 $dst.Q,$src1.Q,$src2.Q\t! mul packed8S" %}
10116   ins_encode %{
10117     __ vmulI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
10118              MacroAssembler::VELEM_SIZE_16, 1);
10119   %}
10120   ins_pipe( ialu_reg_reg ); // FIXME
10121 %}
10122 
10123 // Integers vector mul
10124 instruct vmul2I_reg(vecD dst, vecD src1, vecD src2) %{
10125   predicate(n->as_Vector()->length() == 2);
10126   match(Set dst (MulVI src1 src2));
10127   size(4);
10128   format %{ "VMUL.I32 $dst,$src1,$src2\t! mul packed2I" %}
10129   ins_encode %{
10130     __ vmulI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
10131              MacroAssembler::VELEM_SIZE_32, 0);
10132   %}
10133   ins_pipe( ialu_reg_reg ); // FIXME
10134 %}
10135 
10136 instruct vmul4I_reg(vecX dst, vecX src1, vecX src2) %{
10137   predicate(n->as_Vector()->length() == 4);
10138   match(Set dst (MulVI src1 src2));
10139   size(4);
10140   format %{ "VMUL.I32 $dst.Q,$src1.Q,$src2.Q\t! mul packed4I" %}
10141   ins_encode %{
10142     __ vmulI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
10143              MacroAssembler::VELEM_SIZE_32, 1);
10144   %}
10145   ins_pipe( ialu_reg_reg ); // FIXME
10146 %}
10147 
10148 // Floats vector mul
10149 instruct vmul2F_reg(vecD dst, vecD src1, vecD src2) %{
10150   predicate(n->as_Vector()->length() == 2 && VM_Version::simd_math_is_compliant());
10151   match(Set dst (MulVF src1 src2));
10152   size(4);
10153   format %{ "VMUL.F32 $dst,$src1,$src2\t! mul packed2F" %}
10154   ins_encode %{
10155     __ vmulF($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
10156              MacroAssembler::VFA_SIZE_F32, 0);
10157   %}
10158   ins_pipe( fmulF_reg_reg ); // FIXME
10159 %}
10160 
10161 instruct vmul2F_reg_vfp(vecD dst, vecD src1, vecD src2) %{
10162   predicate(n->as_Vector()->length() == 2 && !VM_Version::simd_math_is_compliant());
10163   match(Set dst (MulVF src1 src2));
10164   size(4*2);
10165   ins_cost(DEFAULT_COST*2); // FIXME
10166 
10167   format %{ "FMULS  $dst.a,$src1.a,$src2.a\n\t"
10168             "FMULS  $dst.b,$src1.b,$src2.b" %}
10169   ins_encode %{
10170     __ mul_float($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
10171     __ mul_float($dst$$FloatRegister->successor(),
10172              $src1$$FloatRegister->successor(),
10173              $src2$$FloatRegister->successor());
10174   %}
10175 
10176   ins_pipe(fmulF_reg_reg); // FIXME
10177 %}
10178 
10179 instruct vmul4F_reg(vecX dst, vecX src1, vecX src2) %{
10180   predicate(n->as_Vector()->length() == 4 && VM_Version::simd_math_is_compliant());
10181   match(Set dst (MulVF src1 src2));
10182   size(4);
10183   format %{ "VMUL.F32 $dst.Q,$src1.Q,$src2.Q\t! mul packed4F" %}
10184   ins_encode %{
10185     __ vmulF($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
10186              MacroAssembler::VFA_SIZE_F32, 1);
10187   %}
10188   ins_pipe( fmulF_reg_reg ); // FIXME
10189 %}
10190 
10191 instruct vmul4F_reg_vfp(vecX dst, vecX src1, vecX src2) %{
10192   predicate(n->as_Vector()->length() == 4 && !VM_Version::simd_math_is_compliant());
10193   match(Set dst (MulVF src1 src2));
10194   size(4*4);
10195   ins_cost(DEFAULT_COST*4); // FIXME
10196 
10197   format %{ "FMULS  $dst.a,$src1.a,$src2.a\n\t"
10198             "FMULS  $dst.b,$src1.b,$src2.b\n\t"
10199             "FMULS  $dst.c,$src1.c,$src2.c\n\t"
10200             "FMULS  $dst.d,$src1.d,$src2.d" %}
10201 
10202   ins_encode %{
10203     FloatRegister dsta = $dst$$FloatRegister;
10204     FloatRegister src1a = $src1$$FloatRegister;
10205     FloatRegister src2a = $src2$$FloatRegister;
10206     __ mul_float(dsta, src1a, src2a);
10207     FloatRegister dstb = dsta->successor();
10208     FloatRegister src1b = src1a->successor();
10209     FloatRegister src2b = src2a->successor();
10210     __ mul_float(dstb, src1b, src2b);
10211     FloatRegister dstc = dstb->successor();
10212     FloatRegister src1c = src1b->successor();
10213     FloatRegister src2c = src2b->successor();
10214     __ mul_float(dstc, src1c, src2c);
10215     FloatRegister dstd = dstc->successor();
10216     FloatRegister src1d = src1c->successor();
10217     FloatRegister src2d = src2c->successor();
10218     __ mul_float(dstd, src1d, src2d);
10219   %}
10220 
10221   ins_pipe(fmulF_reg_reg); // FIXME
10222 %}
10223 
10224 instruct vmul2D_reg_vfp(vecX dst, vecX src1, vecX src2) %{
10225   predicate(n->as_Vector()->length() == 2);
10226   match(Set dst (MulVD src1 src2));
10227   size(4*2);
10228   ins_cost(DEFAULT_COST*2); // FIXME
10229 
10230   format %{ "FMULD  $dst.D.a,$src1.D.a,$src2.D.a\n\t"
10231             "FMULD  $dst.D.b,$src1.D.b,$src2.D.b" %}
10232   ins_encode %{
10233     FloatRegister dsta = $dst$$FloatRegister;
10234     FloatRegister src1a = $src1$$FloatRegister;
10235     FloatRegister src2a = $src2$$FloatRegister;
10236     __ mul_double(dsta, src1a, src2a);
10237     FloatRegister dstb = dsta->successor()->successor();
10238     FloatRegister src1b = src1a->successor()->successor();
10239     FloatRegister src2b = src2a->successor()->successor();
10240     __ mul_double(dstb, src1b, src2b);
10241   %}
10242 
10243   ins_pipe(fmulD_reg_reg); // FIXME
10244 %}
10245 
10246 
10247 // Floats vector div
10248 instruct vdiv2F_reg_vfp(vecD dst, vecD src1, vecD src2) %{
10249   predicate(n->as_Vector()->length() == 2);
10250   match(Set dst (DivVF src1 src2));
10251   size(4*2);
10252   ins_cost(DEFAULT_COST*2); // FIXME
10253 
10254   format %{ "FDIVS  $dst.a,$src1.a,$src2.a\n\t"
10255             "FDIVS  $dst.b,$src1.b,$src2.b" %}
10256   ins_encode %{
10257     __ div_float($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
10258     __ div_float($dst$$FloatRegister->successor(),
10259              $src1$$FloatRegister->successor(),
10260              $src2$$FloatRegister->successor());
10261   %}
10262 
10263   ins_pipe(fdivF_reg_reg); // FIXME
10264 %}
10265 
10266 instruct vdiv4F_reg_vfp(vecX dst, vecX src1, vecX src2) %{
10267   predicate(n->as_Vector()->length() == 4);
10268   match(Set dst (DivVF src1 src2));
10269   size(4*4);
10270   ins_cost(DEFAULT_COST*4); // FIXME
10271 
10272   format %{ "FDIVS  $dst.a,$src1.a,$src2.a\n\t"
10273             "FDIVS  $dst.b,$src1.b,$src2.b\n\t"
10274             "FDIVS  $dst.c,$src1.c,$src2.c\n\t"
10275             "FDIVS  $dst.d,$src1.d,$src2.d" %}
10276 
10277   ins_encode %{
10278     FloatRegister dsta = $dst$$FloatRegister;
10279     FloatRegister src1a = $src1$$FloatRegister;
10280     FloatRegister src2a = $src2$$FloatRegister;
10281     __ div_float(dsta, src1a, src2a);
10282     FloatRegister dstb = dsta->successor();
10283     FloatRegister src1b = src1a->successor();
10284     FloatRegister src2b = src2a->successor();
10285     __ div_float(dstb, src1b, src2b);
10286     FloatRegister dstc = dstb->successor();
10287     FloatRegister src1c = src1b->successor();
10288     FloatRegister src2c = src2b->successor();
10289     __ div_float(dstc, src1c, src2c);
10290     FloatRegister dstd = dstc->successor();
10291     FloatRegister src1d = src1c->successor();
10292     FloatRegister src2d = src2c->successor();
10293     __ div_float(dstd, src1d, src2d);
10294   %}
10295 
10296   ins_pipe(fdivF_reg_reg); // FIXME
10297 %}
10298 
10299 instruct vdiv2D_reg_vfp(vecX dst, vecX src1, vecX src2) %{
10300   predicate(n->as_Vector()->length() == 2);
10301   match(Set dst (DivVD src1 src2));
10302   size(4*2);
10303   ins_cost(DEFAULT_COST*2); // FIXME
10304 
10305   format %{ "FDIVD  $dst.D.a,$src1.D.a,$src2.D.a\n\t"
10306             "FDIVD  $dst.D.b,$src1.D.b,$src2.D.b" %}
10307   ins_encode %{
10308     FloatRegister dsta = $dst$$FloatRegister;
10309     FloatRegister src1a = $src1$$FloatRegister;
10310     FloatRegister src2a = $src2$$FloatRegister;
10311     __ div_double(dsta, src1a, src2a);
10312     FloatRegister dstb = dsta->successor()->successor();
10313     FloatRegister src1b = src1a->successor()->successor();
10314     FloatRegister src2b = src2a->successor()->successor();
10315     __ div_double(dstb, src1b, src2b);
10316   %}
10317 
10318   ins_pipe(fdivD_reg_reg); // FIXME
10319 %}
10320 
10321 // --------------------------------- NEG --------------------------------------
10322 
10323 instruct vneg8B_reg(vecD dst, vecD src) %{
10324   predicate(n->as_Vector()->length_in_bytes() == 8);
10325   effect(DEF dst, USE src);
10326   size(4);
10327   ins_cost(DEFAULT_COST); // FIXME
10328   format %{ "VNEG.S8 $dst.D,$src.D\t! neg packed8B" %}
10329   ins_encode %{
10330     bool quad = false;
10331     __ vnegI($dst$$FloatRegister, $src$$FloatRegister,
10332               MacroAssembler::VELEM_SIZE_8, quad);
10333   %}
10334   ins_pipe( ialu_reg_reg ); // FIXME
10335 %}
10336 
10337 instruct vneg16B_reg(vecX dst, vecX src) %{
10338   predicate(n->as_Vector()->length_in_bytes() == 16);
10339   effect(DEF dst, USE src);
10340   size(4);
10341   ins_cost(DEFAULT_COST); // FIXME
10342   format %{ "VNEG.S8 $dst.Q,$src.Q\t! neg0 packed16B" %}
10343   ins_encode %{
10344     bool _float = false;
10345     bool quad = true;
10346     __ vnegI($dst$$FloatRegister, $src$$FloatRegister,
10347               MacroAssembler::VELEM_SIZE_8, quad);
10348   %}
10349   ins_pipe( ialu_reg_reg ); // FIXME
10350 %}
10351 
10352 // ------------------------------ Shift ---------------------------------------
10353 
10354 instruct vslcntD(vecD dst, iRegI cnt) %{
10355   predicate(n->as_Vector()->length_in_bytes() == 8 && VM_Version::has_simd());
10356   match(Set dst (LShiftCntV cnt));
10357   size(4);
10358   ins_cost(DEFAULT_COST); // FIXME
10359   expand %{
10360     Repl8B_reg_simd(dst, cnt);
10361   %}
10362 %}
10363 
10364 instruct vslcntX(vecX dst, iRegI cnt) %{
10365   predicate(n->as_Vector()->length_in_bytes() == 16 && VM_Version::has_simd());
10366   match(Set dst (LShiftCntV cnt));
10367   size(4);
10368   ins_cost(DEFAULT_COST); // FIXME
10369   expand %{
10370     Repl16B_reg(dst, cnt);
10371   %}
10372 %}
10373 
10374 // Low bits of vector "shift" elements are used, so it
10375 // doesn't matter if we treat it as ints or bytes here.
10376 instruct vsrcntD(vecD dst, iRegI cnt) %{
10377   predicate(n->as_Vector()->length_in_bytes() == 8 && VM_Version::has_simd());
10378   match(Set dst (RShiftCntV cnt));
10379   size(4*2);
10380   ins_cost(DEFAULT_COST*2); // FIXME
10381 
10382   format %{ "VDUP.8 $dst.D,$cnt\n\t"
10383             "VNEG.S8 $dst.D,$dst.D\t! neg packed8B" %}
10384   ins_encode %{
10385     bool quad = false;
10386     __ vdupI($dst$$FloatRegister, $cnt$$Register,
10387              MacroAssembler::VELEM_SIZE_8, quad);
10388     __ vnegI($dst$$FloatRegister, $dst$$FloatRegister,
10389               MacroAssembler::VELEM_SIZE_8, quad);
10390   %}
10391   ins_pipe( ialu_reg_reg ); // FIXME
10392 %}
10393 
10394 instruct vsrcntX(vecX dst, iRegI cnt) %{
10395   predicate(n->as_Vector()->length_in_bytes() == 16 && VM_Version::has_simd());
10396   match(Set dst (RShiftCntV cnt));
10397   size(4*2);
10398   ins_cost(DEFAULT_COST*2); // FIXME
10399   format %{ "VDUP.8 $dst.Q,$cnt\n\t"
10400             "VNEG.S8 $dst.Q,$dst.Q\t! neg packed16B" %}
10401   ins_encode %{
10402     bool quad = true;
10403     __ vdupI($dst$$FloatRegister, $cnt$$Register,
10404              MacroAssembler::VELEM_SIZE_8, quad);
10405     __ vnegI($dst$$FloatRegister, $dst$$FloatRegister,
10406               MacroAssembler::VELEM_SIZE_8, quad);
10407   %}
10408   ins_pipe( ialu_reg_reg ); // FIXME
10409 %}
10410 
10411 // Byte vector logical left/right shift based on sign
10412 instruct vsh8B_reg(vecD dst, vecD src, vecD shift) %{
10413   predicate(n->as_Vector()->length() == 8);
10414   effect(DEF dst, USE src, USE shift);
10415   size(4);
10416   ins_cost(DEFAULT_COST); // FIXME
10417   format %{
10418     "VSHL.U8 $dst.D,$src.D,$shift.D\t! logical left/right shift packed8B"
10419   %}
10420   ins_encode %{
10421     bool quad = false;
10422     __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister,
10423               MacroAssembler::VELEM_SIZE_8, quad);
10424   %}
10425   ins_pipe( ialu_reg_reg ); // FIXME
10426 %}
10427 
10428 instruct vsh16B_reg(vecX dst, vecX src, vecX shift) %{
10429   predicate(n->as_Vector()->length() == 16);
10430   effect(DEF dst, USE src, USE shift);
10431   size(4);
10432   ins_cost(DEFAULT_COST); // FIXME
10433   format %{
10434     "VSHL.U8 $dst.Q,$src.Q,$shift.Q\t! logical left/right shift packed16B"
10435   %}
10436   ins_encode %{
10437     bool quad = true;
10438     __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister,
10439               MacroAssembler::VELEM_SIZE_8, quad);
10440   %}
10441   ins_pipe( ialu_reg_reg ); // FIXME
10442 %}
10443 
10444 // Shorts/Char vector logical left/right shift based on sign
10445 instruct vsh4S_reg(vecD dst, vecD src, vecD shift) %{
10446   predicate(n->as_Vector()->length() == 4);
10447   effect(DEF dst, USE src, USE shift);
10448   size(4);
10449   ins_cost(DEFAULT_COST); // FIXME
10450   format %{
10451     "VSHL.U16 $dst.D,$src.D,$shift.D\t! logical left/right shift packed4S"
10452   %}
10453   ins_encode %{
10454     bool quad = false;
10455     __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister,
10456               MacroAssembler::VELEM_SIZE_16, quad);
10457   %}
10458   ins_pipe( ialu_reg_reg ); // FIXME
10459 %}
10460 
10461 instruct vsh8S_reg(vecX dst, vecX src, vecX shift) %{
10462   predicate(n->as_Vector()->length() == 8);
10463   effect(DEF dst, USE src, USE shift);
10464   size(4);
10465   ins_cost(DEFAULT_COST); // FIXME
10466   format %{
10467     "VSHL.U16 $dst.Q,$src.Q,$shift.Q\t! logical left/right shift packed8S"
10468   %}
10469   ins_encode %{
10470     bool quad = true;
10471     __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister,
10472               MacroAssembler::VELEM_SIZE_16, quad);
10473   %}
10474   ins_pipe( ialu_reg_reg ); // FIXME
10475 %}
10476 
10477 // Integers vector logical left/right shift based on sign
10478 instruct vsh2I_reg(vecD dst, vecD src, vecD shift) %{
10479   predicate(n->as_Vector()->length() == 2);
10480   effect(DEF dst, USE src, USE shift);
10481   size(4);
10482   ins_cost(DEFAULT_COST); // FIXME
10483   format %{
10484     "VSHL.U32 $dst.D,$src.D,$shift.D\t! logical left/right shift packed2I"
10485   %}
10486   ins_encode %{
10487     bool quad = false;
10488     __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister,
10489               MacroAssembler::VELEM_SIZE_32, quad);
10490   %}
10491   ins_pipe( ialu_reg_reg ); // FIXME
10492 %}
10493 
10494 instruct vsh4I_reg(vecX dst, vecX src, vecX shift) %{
10495   predicate(n->as_Vector()->length() == 4);
10496   effect(DEF dst, USE src, USE shift);
10497   size(4);
10498   ins_cost(DEFAULT_COST); // FIXME
10499   format %{
10500     "VSHL.U32 $dst.Q,$src.Q,$shift.Q\t! logical left/right shift packed4I"
10501   %}
10502   ins_encode %{
10503     bool quad = true;
10504     __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister,
10505               MacroAssembler::VELEM_SIZE_32, quad);
10506   %}
10507   ins_pipe( ialu_reg_reg ); // FIXME
10508 %}
10509 
10510 // Longs vector logical left/right shift based on sign
10511 instruct vsh2L_reg(vecX dst, vecX src, vecX shift) %{
10512   predicate(n->as_Vector()->length() == 2);
10513   effect(DEF dst, USE src, USE shift);
10514   size(4);
10515   ins_cost(DEFAULT_COST); // FIXME
10516   format %{
10517     "VSHL.U64 $dst.Q,$src.Q,$shift.Q\t! logical left/right shift packed2L"
10518   %}
10519   ins_encode %{
10520     bool quad = true;
10521     __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister,
10522               MacroAssembler::VELEM_SIZE_64, quad);
10523   %}
10524   ins_pipe( ialu_reg_reg ); // FIXME
10525 %}
10526 
10527 // ------------------------------ LeftShift -----------------------------------
10528 
10529 // Byte vector left shift
10530 instruct vsl8B_reg(vecD dst, vecD src, vecD shift) %{
10531   predicate(n->as_Vector()->length() == 8);
10532   match(Set dst (LShiftVB src shift));
10533   size(4*1);
10534   ins_cost(DEFAULT_COST*1); // FIXME
10535   expand %{
10536     vsh8B_reg(dst, src, shift);
10537   %}
10538 %}
10539 
10540 instruct vsl16B_reg(vecX dst, vecX src, vecX shift) %{
10541   predicate(n->as_Vector()->length() == 16);
10542   match(Set dst (LShiftVB src shift));
10543   size(4*1);
10544   ins_cost(DEFAULT_COST*1); // FIXME
10545   expand %{
10546     vsh16B_reg(dst, src, shift);
10547   %}
10548 %}
10549 
10550 instruct vsl8B_immI(vecD dst, vecD src, immI shift) %{
10551   predicate(n->as_Vector()->length() == 8);
10552   match(Set dst (LShiftVB src shift));
10553   size(4);
10554   ins_cost(DEFAULT_COST); // FIXME
10555   format %{
10556     "VSHL.I8 $dst.D,$src.D,$shift\t! logical left shift packed8B"
10557   %}
10558   ins_encode %{
10559     bool quad = false;
10560     __ vshli($dst$$FloatRegister, $src$$FloatRegister, 8, $shift$$constant,
10561              quad);
10562   %}
10563   ins_pipe( ialu_reg_reg ); // FIXME
10564 %}
10565 
10566 instruct vsl16B_immI(vecX dst, vecX src, immI shift) %{
10567   predicate(n->as_Vector()->length() == 16);
10568   match(Set dst (LShiftVB src shift));
10569   size(4);
10570   ins_cost(DEFAULT_COST); // FIXME
10571   format %{
10572     "VSHL.I8 $dst.Q,$src.Q,$shift\t! logical left shift packed16B"
10573   %}
10574   ins_encode %{
10575     bool quad = true;
10576     __ vshli($dst$$FloatRegister, $src$$FloatRegister, 8, $shift$$constant,
10577              quad);
10578   %}
10579   ins_pipe( ialu_reg_reg ); // FIXME
10580 %}
10581 
10582 // Shorts/Chars vector logical left/right shift
10583 instruct vsl4S_reg(vecD dst, vecD src, vecD shift) %{
10584   predicate(n->as_Vector()->length() == 4);
10585   match(Set dst (LShiftVS src shift));
10586   match(Set dst (URShiftVS src shift));
10587   size(4*1);
10588   ins_cost(DEFAULT_COST*1); // FIXME
10589   expand %{
10590     vsh4S_reg(dst, src, shift);
10591   %}
10592 %}
10593 
10594 instruct vsl8S_reg(vecX dst, vecX src, vecX shift) %{
10595   predicate(n->as_Vector()->length() == 8);
10596   match(Set dst (LShiftVS src shift));
10597   match(Set dst (URShiftVS src shift));
10598   size(4*1);
10599   ins_cost(DEFAULT_COST*1); // FIXME
10600   expand %{
10601     vsh8S_reg(dst, src, shift);
10602   %}
10603 %}
10604 
10605 instruct vsl4S_immI(vecD dst, vecD src, immI shift) %{
10606   predicate(n->as_Vector()->length() == 4);
10607   match(Set dst (LShiftVS src shift));
10608   size(4);
10609   ins_cost(DEFAULT_COST); // FIXME
10610   format %{
10611     "VSHL.I16 $dst.D,$src.D,$shift\t! logical left shift packed4S"
10612   %}
10613   ins_encode %{
10614     bool quad = false;
10615     __ vshli($dst$$FloatRegister, $src$$FloatRegister, 16, $shift$$constant,
10616              quad);
10617   %}
10618   ins_pipe( ialu_reg_reg ); // FIXME
10619 %}
10620 
10621 instruct vsl8S_immI(vecX dst, vecX src, immI shift) %{
10622   predicate(n->as_Vector()->length() == 8);
10623   match(Set dst (LShiftVS src shift));
10624   size(4);
10625   ins_cost(DEFAULT_COST); // FIXME
10626   format %{
10627     "VSHL.I16 $dst.Q,$src.Q,$shift\t! logical left shift packed8S"
10628   %}
10629   ins_encode %{
10630     bool quad = true;
10631     __ vshli($dst$$FloatRegister, $src$$FloatRegister, 16, $shift$$constant,
10632              quad);
10633   %}
10634   ins_pipe( ialu_reg_reg ); // FIXME
10635 %}
10636 
10637 // Integers vector logical left/right shift
10638 instruct vsl2I_reg(vecD dst, vecD src, vecD shift) %{
10639   predicate(n->as_Vector()->length() == 2 && VM_Version::has_simd());
10640   match(Set dst (LShiftVI src shift));
10641   match(Set dst (URShiftVI src shift));
10642   size(4*1);
10643   ins_cost(DEFAULT_COST*1); // FIXME
10644   expand %{
10645     vsh2I_reg(dst, src, shift);
10646   %}
10647 %}
10648 
10649 instruct vsl4I_reg(vecX dst, vecX src, vecX shift) %{
10650   predicate(n->as_Vector()->length() == 4 && VM_Version::has_simd());
10651   match(Set dst (LShiftVI src shift));
10652   match(Set dst (URShiftVI src shift));
10653   size(4*1);
10654   ins_cost(DEFAULT_COST*1); // FIXME
10655   expand %{
10656     vsh4I_reg(dst, src, shift);
10657   %}
10658 %}
10659 
10660 instruct vsl2I_immI(vecD dst, vecD src, immI shift) %{
10661   predicate(n->as_Vector()->length() == 2 && VM_Version::has_simd());
10662   match(Set dst (LShiftVI src shift));
10663   size(4);
10664   ins_cost(DEFAULT_COST); // FIXME
10665   format %{
10666     "VSHL.I32 $dst.D,$src.D,$shift\t! logical left shift packed2I"
10667   %}
10668   ins_encode %{
10669     bool quad = false;
10670     __ vshli($dst$$FloatRegister, $src$$FloatRegister, 32, $shift$$constant,
10671              quad);
10672   %}
10673   ins_pipe( ialu_reg_reg ); // FIXME
10674 %}
10675 
10676 instruct vsl4I_immI(vecX dst, vecX src, immI shift) %{
10677   predicate(n->as_Vector()->length() == 4 && VM_Version::has_simd());
10678   match(Set dst (LShiftVI src shift));
10679   size(4);
10680   ins_cost(DEFAULT_COST); // FIXME
10681   format %{
10682     "VSHL.I32 $dst.Q,$src.Q,$shift\t! logical left shift packed4I"
10683   %}
10684   ins_encode %{
10685     bool quad = true;
10686     __ vshli($dst$$FloatRegister, $src$$FloatRegister, 32, $shift$$constant,
10687              quad);
10688   %}
10689   ins_pipe( ialu_reg_reg ); // FIXME
10690 %}
10691 
10692 // Longs vector logical left/right shift
10693 instruct vsl2L_reg(vecX dst, vecX src, vecX shift) %{
10694   predicate(n->as_Vector()->length() == 2);
10695   match(Set dst (LShiftVL src shift));
10696   match(Set dst (URShiftVL src shift));
10697   size(4*1);
10698   ins_cost(DEFAULT_COST*1); // FIXME
10699   expand %{
10700     vsh2L_reg(dst, src, shift);
10701   %}
10702 %}
10703 
10704 instruct vsl2L_immI(vecX dst, vecX src, immI shift) %{
10705   predicate(n->as_Vector()->length() == 2);
10706   match(Set dst (LShiftVL src shift));
10707   size(4);
10708   ins_cost(DEFAULT_COST); // FIXME
10709   format %{
10710     "VSHL.I64 $dst.Q,$src.Q,$shift\t! logical left shift packed2L"
10711   %}
10712   ins_encode %{
10713     bool quad = true;
10714     __ vshli($dst$$FloatRegister, $src$$FloatRegister, 64, $shift$$constant,
10715              quad);
10716   %}
10717   ins_pipe( ialu_reg_reg ); // FIXME
10718 %}
10719 
10720 // ----------------------- LogicalRightShift -----------------------------------
10721 
10722 // Bytes/Shorts vector logical right shift produces incorrect Java result
10723 // for negative data because java code convert short value into int with
10724 // sign extension before a shift.
10725 
10726 // Chars vector logical right shift
10727 instruct vsrl4S_immI(vecD dst, vecD src, immI shift) %{
10728   predicate(n->as_Vector()->length() == 4);
10729   match(Set dst (URShiftVS src shift));
10730   size(4);
10731   ins_cost(DEFAULT_COST); // FIXME
10732   format %{
10733     "VSHR.U16 $dst.D,$src.D,$shift\t! logical right shift packed4S"
10734   %}
10735   ins_encode %{
10736     bool quad = false;
10737     __ vshrUI($dst$$FloatRegister, $src$$FloatRegister, 16, $shift$$constant,
10738              quad);
10739   %}
10740   ins_pipe( ialu_reg_reg ); // FIXME
10741 %}
10742 
10743 instruct vsrl8S_immI(vecX dst, vecX src, immI shift) %{
10744   predicate(n->as_Vector()->length() == 8);
10745   match(Set dst (URShiftVS src shift));
10746   size(4);
10747   ins_cost(DEFAULT_COST); // FIXME
10748   format %{
10749     "VSHR.U16 $dst.Q,$src.Q,$shift\t! logical right shift packed8S"
10750   %}
10751   ins_encode %{
10752     bool quad = true;
10753     __ vshrUI($dst$$FloatRegister, $src$$FloatRegister, 16, $shift$$constant,
10754              quad);
10755   %}
10756   ins_pipe( ialu_reg_reg ); // FIXME
10757 %}
10758 
10759 // Integers vector logical right shift
10760 instruct vsrl2I_immI(vecD dst, vecD src, immI shift) %{
10761   predicate(n->as_Vector()->length() == 2 && VM_Version::has_simd());
10762   match(Set dst (URShiftVI src shift));
10763   size(4);
10764   ins_cost(DEFAULT_COST); // FIXME
10765   format %{
10766     "VSHR.U32 $dst.D,$src.D,$shift\t! logical right shift packed2I"
10767   %}
10768   ins_encode %{
10769     bool quad = false;
10770     __ vshrUI($dst$$FloatRegister, $src$$FloatRegister, 32, $shift$$constant,
10771              quad);
10772   %}
10773   ins_pipe( ialu_reg_reg ); // FIXME
10774 %}
10775 
10776 instruct vsrl4I_immI(vecX dst, vecX src, immI shift) %{
10777   predicate(n->as_Vector()->length() == 4 && VM_Version::has_simd());
10778   match(Set dst (URShiftVI src shift));
10779   size(4);
10780   ins_cost(DEFAULT_COST); // FIXME
10781   format %{
10782     "VSHR.U32 $dst.Q,$src.Q,$shift\t! logical right shift packed4I"
10783   %}
10784   ins_encode %{
10785     bool quad = true;
10786     __ vshrUI($dst$$FloatRegister, $src$$FloatRegister, 32, $shift$$constant,
10787              quad);
10788   %}
10789   ins_pipe( ialu_reg_reg ); // FIXME
10790 %}
10791 
10792 // Longs vector logical right shift
10793 instruct vsrl2L_immI(vecX dst, vecX src, immI shift) %{
10794   predicate(n->as_Vector()->length() == 2);
10795   match(Set dst (URShiftVL src shift));
10796   size(4);
10797   ins_cost(DEFAULT_COST); // FIXME
10798   format %{
10799     "VSHR.U64 $dst.Q,$src.Q,$shift\t! logical right shift packed2L"
10800   %}
10801   ins_encode %{
10802     bool quad = true;
10803     __ vshrUI($dst$$FloatRegister, $src$$FloatRegister, 64, $shift$$constant,
10804              quad);
10805   %}
10806   ins_pipe( ialu_reg_reg ); // FIXME
10807 %}
10808 
10809 // ------------------- ArithmeticRightShift -----------------------------------
10810 
10811 // Bytes vector arithmetic left/right shift based on sign
10812 instruct vsha8B_reg(vecD dst, vecD src, vecD shift) %{
10813   predicate(n->as_Vector()->length() == 8);
10814   effect(DEF dst, USE src, USE shift);
10815   size(4);
10816   ins_cost(DEFAULT_COST); // FIXME
10817   format %{
10818     "VSHL.S8 $dst.D,$src.D,$shift.D\t! arithmetic right shift packed8B"
10819   %}
10820   ins_encode %{
10821     bool quad = false;
10822     __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister,
10823               MacroAssembler::VELEM_SIZE_8, quad);
10824   %}
10825   ins_pipe( ialu_reg_reg ); // FIXME
10826 %}
10827 
10828 instruct vsha16B_reg(vecX dst, vecX src, vecX shift) %{
10829   predicate(n->as_Vector()->length() == 16);
10830   effect(DEF dst, USE src, USE shift);
10831   size(4);
10832   ins_cost(DEFAULT_COST); // FIXME
10833   format %{
10834     "VSHL.S8 $dst.Q,$src.Q,$shift.Q\t! arithmetic right shift packed16B"
10835   %}
10836   ins_encode %{
10837     bool quad = true;
10838     __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister,
10839               MacroAssembler::VELEM_SIZE_8, quad);
10840   %}
10841   ins_pipe( ialu_reg_reg ); // FIXME
10842 %}
10843 
10844 // Shorts vector arithmetic left/right shift based on sign
10845 instruct vsha4S_reg(vecD dst, vecD src, vecD shift) %{
10846   predicate(n->as_Vector()->length() == 4);
10847   effect(DEF dst, USE src, USE shift);
10848   size(4);
10849   ins_cost(DEFAULT_COST); // FIXME
10850   format %{
10851     "VSHL.S16 $dst.D,$src.D,$shift.D\t! arithmetic right shift packed4S"
10852   %}
10853   ins_encode %{
10854     bool quad = false;
10855     __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister,
10856               MacroAssembler::VELEM_SIZE_16, quad);
10857   %}
10858   ins_pipe( ialu_reg_reg ); // FIXME
10859 %}
10860 
10861 instruct vsha8S_reg(vecX dst, vecX src, vecX shift) %{
10862   predicate(n->as_Vector()->length() == 8);
10863   effect(DEF dst, USE src, USE shift);
10864   size(4);
10865   ins_cost(DEFAULT_COST); // FIXME
10866   format %{
10867     "VSHL.S16 $dst.Q,$src.Q,$shift.Q\t! arithmetic right shift packed8S"
10868   %}
10869   ins_encode %{
10870     bool quad = true;
10871     __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister,
10872               MacroAssembler::VELEM_SIZE_16, quad);
10873   %}
10874   ins_pipe( ialu_reg_reg ); // FIXME
10875 %}
10876 
10877 // Integers vector arithmetic left/right shift based on sign
10878 instruct vsha2I_reg(vecD dst, vecD src, vecD shift) %{
10879   predicate(n->as_Vector()->length() == 2);
10880   effect(DEF dst, USE src, USE shift);
10881   size(4);
10882   ins_cost(DEFAULT_COST); // FIXME
10883   format %{
10884     "VSHL.S32 $dst.D,$src.D,$shift.D\t! arithmetic right shift packed2I"
10885   %}
10886   ins_encode %{
10887     bool quad = false;
10888     __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister,
10889               MacroAssembler::VELEM_SIZE_32, quad);
10890   %}
10891   ins_pipe( ialu_reg_reg ); // FIXME
10892 %}
10893 
10894 instruct vsha4I_reg(vecX dst, vecX src, vecX shift) %{
10895   predicate(n->as_Vector()->length() == 4);
10896   effect(DEF dst, USE src, USE shift);
10897   size(4);
10898   ins_cost(DEFAULT_COST); // FIXME
10899   format %{
10900     "VSHL.S32 $dst.Q,$src.Q,$shift.Q\t! arithmetic right shift packed4I"
10901   %}
10902   ins_encode %{
10903     bool quad = true;
10904     __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister,
10905               MacroAssembler::VELEM_SIZE_32, quad);
10906   %}
10907   ins_pipe( ialu_reg_reg ); // FIXME
10908 %}
10909 
10910 // Longs vector arithmetic left/right shift based on sign
10911 instruct vsha2L_reg(vecX dst, vecX src, vecX shift) %{
10912   predicate(n->as_Vector()->length() == 2);
10913   effect(DEF dst, USE src, USE shift);
10914   size(4);
10915   ins_cost(DEFAULT_COST); // FIXME
10916   format %{
10917     "VSHL.S64 $dst.Q,$src.Q,$shift.Q\t! arithmetic right shift packed2L"
10918   %}
10919   ins_encode %{
10920     bool quad = true;
10921     __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister,
10922               MacroAssembler::VELEM_SIZE_64, quad);
10923   %}
10924   ins_pipe( ialu_reg_reg ); // FIXME
10925 %}
10926 
10927 // Byte vector arithmetic right shift
10928 
10929 instruct vsra8B_reg(vecD dst, vecD src, vecD shift) %{
10930   predicate(n->as_Vector()->length() == 8);
10931   match(Set dst (RShiftVB src shift));
10932   size(4);
10933   ins_cost(DEFAULT_COST); // FIXME
10934   expand %{
10935     vsha8B_reg(dst, src, shift);
10936   %}
10937 %}
10938 
10939 instruct vsrl16B_reg(vecX dst, vecX src, vecX shift) %{
10940   predicate(n->as_Vector()->length() == 16);
10941   match(Set dst (RShiftVB src shift));
10942   size(4);
10943   ins_cost(DEFAULT_COST); // FIXME
10944   expand %{
10945     vsha16B_reg(dst, src, shift);
10946   %}
10947 %}
10948 
10949 instruct vsrl8B_immI(vecD dst, vecD src, immI shift) %{
10950   predicate(n->as_Vector()->length() == 8);
10951   match(Set dst (RShiftVB src shift));
10952   size(4);
10953   ins_cost(DEFAULT_COST); // FIXME
10954   format %{
10955     "VSHR.S8 $dst.D,$src.D,$shift\t! logical right shift packed8B"
10956   %}
10957   ins_encode %{
10958     bool quad = false;
10959     __ vshrSI($dst$$FloatRegister, $src$$FloatRegister, 8, $shift$$constant,
10960              quad);
10961   %}
10962   ins_pipe( ialu_reg_reg ); // FIXME
10963 %}
10964 
10965 instruct vsrl16B_immI(vecX dst, vecX src, immI shift) %{
10966   predicate(n->as_Vector()->length() == 16);
10967   match(Set dst (RShiftVB src shift));
10968   size(4);
10969   ins_cost(DEFAULT_COST); // FIXME
10970   format %{
10971     "VSHR.S8 $dst.Q,$src.Q,$shift\t! logical right shift packed16B"
10972   %}
10973   ins_encode %{
10974     bool quad = true;
10975     __ vshrSI($dst$$FloatRegister, $src$$FloatRegister, 8, $shift$$constant,
10976              quad);
10977   %}
10978   ins_pipe( ialu_reg_reg ); // FIXME
10979 %}
10980 
10981 // Shorts vector arithmetic right shift
10982 instruct vsra4S_reg(vecD dst, vecD src, vecD shift) %{
10983   predicate(n->as_Vector()->length() == 4);
10984   match(Set dst (RShiftVS src shift));
10985   size(4);
10986   ins_cost(DEFAULT_COST); // FIXME
10987   expand %{
10988     vsha4S_reg(dst, src, shift);
10989   %}
10990 %}
10991 
10992 instruct vsra8S_reg(vecX dst, vecX src, vecX shift) %{
10993   predicate(n->as_Vector()->length() == 8);
10994   match(Set dst (RShiftVS src shift));
10995   size(4);
10996   ins_cost(DEFAULT_COST); // FIXME
10997   expand %{
10998     vsha8S_reg(dst, src, shift);
10999   %}
11000 %}
11001 
11002 instruct vsra4S_immI(vecD dst, vecD src, immI shift) %{
11003   predicate(n->as_Vector()->length() == 4);
11004   match(Set dst (RShiftVS src shift));
11005   size(4);
11006   ins_cost(DEFAULT_COST); // FIXME
11007   format %{
11008     "VSHR.S16 $dst.D,$src.D,$shift\t! logical right shift packed4S"
11009   %}
11010   ins_encode %{
11011     bool quad = false;
11012     __ vshrSI($dst$$FloatRegister, $src$$FloatRegister, 16, $shift$$constant,
11013              quad);
11014   %}
11015   ins_pipe( ialu_reg_reg ); // FIXME
11016 %}
11017 
11018 instruct vsra8S_immI(vecX dst, vecX src, immI shift) %{
11019   predicate(n->as_Vector()->length() == 8);
11020   match(Set dst (RShiftVS src shift));
11021   size(4);
11022   ins_cost(DEFAULT_COST); // FIXME
11023   format %{
11024     "VSHR.S16 $dst.Q,$src.Q,$shift\t! logical right shift packed8S"
11025   %}
11026   ins_encode %{
11027     bool quad = true;
11028     __ vshrSI($dst$$FloatRegister, $src$$FloatRegister, 16, $shift$$constant,
11029              quad);
11030   %}
11031   ins_pipe( ialu_reg_reg ); // FIXME
11032 %}
11033 
11034 // Integers vector arithmetic right shift
11035 instruct vsra2I_reg(vecD dst, vecD src, vecD shift) %{
11036   predicate(n->as_Vector()->length() == 2);
11037   match(Set dst (RShiftVI src shift));
11038   size(4);
11039   ins_cost(DEFAULT_COST); // FIXME
11040   expand %{
11041     vsha2I_reg(dst, src, shift);
11042   %}
11043 %}
11044 
11045 instruct vsra4I_reg(vecX dst, vecX src, vecX shift) %{
11046   predicate(n->as_Vector()->length() == 4);
11047   match(Set dst (RShiftVI src shift));
11048   size(4);
11049   ins_cost(DEFAULT_COST); // FIXME
11050   expand %{
11051     vsha4I_reg(dst, src, shift);
11052   %}
11053 %}
11054 
11055 instruct vsra2I_immI(vecD dst, vecD src, immI shift) %{
11056   predicate(n->as_Vector()->length() == 2);
11057   match(Set dst (RShiftVI src shift));
11058   size(4);
11059   ins_cost(DEFAULT_COST); // FIXME
11060   format %{
11061     "VSHR.S32 $dst.D,$src.D,$shift\t! logical right shift packed2I"
11062   %}
11063   ins_encode %{
11064     bool quad = false;
11065     __ vshrSI($dst$$FloatRegister, $src$$FloatRegister, 32, $shift$$constant,
11066              quad);
11067   %}
11068   ins_pipe( ialu_reg_reg ); // FIXME
11069 %}
11070 
11071 instruct vsra4I_immI(vecX dst, vecX src, immI shift) %{
11072   predicate(n->as_Vector()->length() == 4);
11073   match(Set dst (RShiftVI src shift));
11074   size(4);
11075   ins_cost(DEFAULT_COST); // FIXME
11076   format %{
11077     "VSHR.S32 $dst.Q,$src.Q,$shift\t! logical right shift packed4I"
11078   %}
11079   ins_encode %{
11080     bool quad = true;
11081     __ vshrSI($dst$$FloatRegister, $src$$FloatRegister, 32, $shift$$constant,
11082              quad);
11083   %}
11084   ins_pipe( ialu_reg_reg ); // FIXME
11085 %}
11086 
11087 // Longs vector arithmetic right shift
11088 instruct vsra2L_reg(vecX dst, vecX src, vecX shift) %{
11089   predicate(n->as_Vector()->length() == 2);
11090   match(Set dst (RShiftVL src shift));
11091   size(4);
11092   ins_cost(DEFAULT_COST); // FIXME
11093   expand %{
11094     vsha2L_reg(dst, src, shift);
11095   %}
11096 %}
11097 
11098 instruct vsra2L_immI(vecX dst, vecX src, immI shift) %{
11099   predicate(n->as_Vector()->length() == 2);
11100   match(Set dst (RShiftVL src shift));
11101   size(4);
11102   ins_cost(DEFAULT_COST); // FIXME
11103   format %{
11104     "VSHR.S64 $dst.Q,$src.Q,$shift\t! logical right shift packed2L"
11105   %}
11106   ins_encode %{
11107     bool quad = true;
11108     __ vshrSI($dst$$FloatRegister, $src$$FloatRegister, 64, $shift$$constant,
11109              quad);
11110   %}
11111   ins_pipe( ialu_reg_reg ); // FIXME
11112 %}
11113 
11114 // --------------------------------- AND --------------------------------------
11115 
11116 instruct vandD(vecD dst, vecD src1, vecD src2) %{
11117   predicate(n->as_Vector()->length_in_bytes() == 8);
11118   match(Set dst (AndV src1 src2));
11119   format %{ "VAND    $dst.D,$src1.D,$src2.D\t! and vectors (8 bytes)" %}
11120   ins_encode %{
11121     bool quad = false;
11122     __ vandI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
11123              quad);
11124   %}
11125   ins_pipe( ialu_reg_reg ); // FIXME
11126 %}
11127 
11128 instruct vandX(vecX dst, vecX src1, vecX src2) %{
11129   predicate(n->as_Vector()->length_in_bytes() == 16);
11130   match(Set dst (AndV src1 src2));
11131   format %{ "VAND    $dst.Q,$src1.Q,$src2.Q\t! and vectors (16 bytes)" %}
11132   ins_encode %{
11133     bool quad = true;
11134     __ vandI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
11135              quad);
11136   %}
11137   ins_pipe( ialu_reg_reg ); // FIXME
11138 %}
11139 
11140 // --------------------------------- OR ---------------------------------------
11141 
11142 instruct vorD(vecD dst, vecD src1, vecD src2) %{
11143   predicate(n->as_Vector()->length_in_bytes() == 8);
11144   match(Set dst (OrV src1 src2));
11145   format %{ "VOR     $dst.D,$src1.D,$src2.D\t! and vectors (8 bytes)" %}
11146   ins_encode %{
11147     bool quad = false;
11148     __ vorI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
11149             quad);
11150   %}
11151   ins_pipe( ialu_reg_reg ); // FIXME
11152 %}
11153 
11154 instruct vorX(vecX dst, vecX src1, vecX src2) %{
11155   predicate(n->as_Vector()->length_in_bytes() == 16);
11156   match(Set dst (OrV src1 src2));
11157   format %{ "VOR     $dst.Q,$src1.Q,$src2.Q\t! and vectors (16 bytes)" %}
11158   ins_encode %{
11159     bool quad = true;
11160     __ vorI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
11161             quad);
11162   %}
11163   ins_pipe( ialu_reg_reg ); // FIXME
11164 %}
11165 
11166 // --------------------------------- XOR --------------------------------------
11167 
11168 instruct vxorD(vecD dst, vecD src1, vecD src2) %{
11169   predicate(n->as_Vector()->length_in_bytes() == 8);
11170   match(Set dst (XorV src1 src2));
11171   format %{ "VXOR    $dst.D,$src1.D,$src2.D\t! and vectors (8 bytes)" %}
11172   ins_encode %{
11173     bool quad = false;
11174     __ vxorI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
11175              quad);
11176   %}
11177   ins_pipe( ialu_reg_reg ); // FIXME
11178 %}
11179 
11180 instruct vxorX(vecX dst, vecX src1, vecX src2) %{
11181   predicate(n->as_Vector()->length_in_bytes() == 16);
11182   match(Set dst (XorV src1 src2));
11183   format %{ "VXOR    $dst.Q,$src1.Q,$src2.Q\t! and vectors (16 bytes)" %}
11184   ins_encode %{
11185     bool quad = true;
11186     __ vxorI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
11187              quad);
11188   %}
11189   ins_pipe( ialu_reg_reg ); // FIXME
11190 %}
11191 
11192 
11193 //----------PEEPHOLE RULES-----------------------------------------------------
11194 // These must follow all instruction definitions as they use the names
11195 // defined in the instructions definitions.
11196 //
11197 // peepmatch ( root_instr_name [preceding_instruction]* );
11198 //
11199 // peepconstraint %{
11200 // (instruction_number.operand_name relational_op instruction_number.operand_name
11201 //  [, ...] );
11202 // // instruction numbers are zero-based using left to right order in peepmatch
11203 //
11204 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
11205 // // provide an instruction_number.operand_name for each operand that appears
11206 // // in the replacement instruction's match rule
11207 //
11208 // ---------VM FLAGS---------------------------------------------------------
11209 //
11210 // All peephole optimizations can be turned off using -XX:-OptoPeephole
11211 //
11212 // Each peephole rule is given an identifying number starting with zero and
11213 // increasing by one in the order seen by the parser.  An individual peephole
11214 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
11215 // on the command-line.
11216 //
11217 // ---------CURRENT LIMITATIONS----------------------------------------------
11218 //
11219 // Only match adjacent instructions in same basic block
11220 // Only equality constraints
11221 // Only constraints between operands, not (0.dest_reg == EAX_enc)
11222 // Only one replacement instruction
11223 //
11224 // ---------EXAMPLE----------------------------------------------------------
11225 //
11226 // // pertinent parts of existing instructions in architecture description
11227 // instruct movI(eRegI dst, eRegI src) %{
11228 //   match(Set dst (CopyI src));
11229 // %}
11230 //
11231 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
11232 //   match(Set dst (AddI dst src));
11233 //   effect(KILL cr);
11234 // %}
11235 //
11236 // // Change (inc mov) to lea
11237 // peephole %{
11238 //   // increment preceeded by register-register move
11239 //   peepmatch ( incI_eReg movI );
11240 //   // require that the destination register of the increment
11241 //   // match the destination register of the move
11242 //   peepconstraint ( 0.dst == 1.dst );
11243 //   // construct a replacement instruction that sets
11244 //   // the destination to ( move's source register + one )
11245 //   peepreplace ( incI_eReg_immI1( 0.dst 1.src 0.src ) );
11246 // %}
11247 //
11248 
11249 // // Change load of spilled value to only a spill
11250 // instruct storeI(memory mem, eRegI src) %{
11251 //   match(Set mem (StoreI mem src));
11252 // %}
11253 //
11254 // instruct loadI(eRegI dst, memory mem) %{
11255 //   match(Set dst (LoadI mem));
11256 // %}
11257 //
11258 // peephole %{
11259 //   peepmatch ( loadI storeI );
11260 //   peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
11261 //   peepreplace ( storeI( 1.mem 1.mem 1.src ) );
11262 // %}
11263 
11264 //----------SMARTSPILL RULES---------------------------------------------------
11265 // These must follow all instruction definitions as they use the names
11266 // defined in the instructions definitions.
11267 //
11268 // ARM will probably not have any of these rules due to RISC instruction set.
11269 
11270 //----------PIPELINE-----------------------------------------------------------
11271 // Rules which define the behavior of the target architectures pipeline.