1 //
   2 // Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4 //
   5 // This code is free software; you can redistribute it and/or modify it
   6 // under the terms of the GNU General Public License version 2 only, as
   7 // published by the Free Software Foundation.
   8 //
   9 // This code is distributed in the hope that it will be useful, but WITHOUT
  10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12 // version 2 for more details (a copy is included in the LICENSE file that
  13 // accompanied this code).
  14 //
  15 // You should have received a copy of the GNU General Public License version
  16 // 2 along with this work; if not, write to the Free Software Foundation,
  17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18 //
  19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20 // or visit www.oracle.com if you need additional information or have any
  21 // questions.
  22 //
  23 //
  24 
  25 // X86 Architecture Description File
  26 
  27 //----------REGISTER DEFINITION BLOCK------------------------------------------
  28 // This information is used by the matcher and the register allocator to
  29 // describe individual registers and classes of registers within the target
  30 // archtecture.
  31 
  32 register %{
  33 //----------Architecture Description Register Definitions----------------------
  34 // General Registers
  35 // "reg_def"  name ( register save type, C convention save type,
  36 //                   ideal register type, encoding );
  37 // Register Save Types:
  38 //
  39 // NS  = No-Save:       The register allocator assumes that these registers
  40 //                      can be used without saving upon entry to the method, &
  41 //                      that they do not need to be saved at call sites.
  42 //
  43 // SOC = Save-On-Call:  The register allocator assumes that these registers
  44 //                      can be used without saving upon entry to the method,
  45 //                      but that they must be saved at call sites.
  46 //
  47 // SOE = Save-On-Entry: The register allocator assumes that these registers
  48 //                      must be saved before using them upon entry to the
  49 //                      method, but they do not need to be saved at call
  50 //                      sites.
  51 //
  52 // AS  = Always-Save:   The register allocator assumes that these registers
  53 //                      must be saved before using them upon entry to the
  54 //                      method, & that they must be saved at call sites.
  55 //
  56 // Ideal Register Type is used to determine how to save & restore a
  57 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  58 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  59 //
  60 // The encoding number is the actual bit-pattern placed into the opcodes.
  61 
  62 // General Registers
  63 // Previously set EBX, ESI, and EDI as save-on-entry for java code
  64 // Turn off SOE in java-code due to frequent use of uncommon-traps.
  65 // Now that allocator is better, turn on ESI and EDI as SOE registers.
  66 
  67 reg_def EBX(SOC, SOE, Op_RegI, 3, rbx->as_VMReg());
  68 reg_def ECX(SOC, SOC, Op_RegI, 1, rcx->as_VMReg());
  69 reg_def ESI(SOC, SOE, Op_RegI, 6, rsi->as_VMReg());
  70 reg_def EDI(SOC, SOE, Op_RegI, 7, rdi->as_VMReg());
  71 // now that adapter frames are gone EBP is always saved and restored by the prolog/epilog code
  72 reg_def EBP(NS, SOE, Op_RegI, 5, rbp->as_VMReg());
  73 reg_def EDX(SOC, SOC, Op_RegI, 2, rdx->as_VMReg());
  74 reg_def EAX(SOC, SOC, Op_RegI, 0, rax->as_VMReg());
  75 reg_def ESP( NS,  NS, Op_RegI, 4, rsp->as_VMReg());
  76 
  77 // Float registers.  We treat TOS/FPR0 special.  It is invisible to the
  78 // allocator, and only shows up in the encodings.
  79 reg_def FPR0L( SOC, SOC, Op_RegF, 0, VMRegImpl::Bad());
  80 reg_def FPR0H( SOC, SOC, Op_RegF, 0, VMRegImpl::Bad());
  81 // Ok so here's the trick FPR1 is really st(0) except in the midst
  82 // of emission of assembly for a machnode. During the emission the fpu stack
  83 // is pushed making FPR1 == st(1) temporarily. However at any safepoint
  84 // the stack will not have this element so FPR1 == st(0) from the
  85 // oopMap viewpoint. This same weirdness with numbering causes
  86 // instruction encoding to have to play games with the register
  87 // encode to correct for this 0/1 issue. See MachSpillCopyNode::implementation
  88 // where it does flt->flt moves to see an example
  89 //
  90 reg_def FPR1L( SOC, SOC, Op_RegF, 1, as_FloatRegister(0)->as_VMReg());
  91 reg_def FPR1H( SOC, SOC, Op_RegF, 1, as_FloatRegister(0)->as_VMReg()->next());
  92 reg_def FPR2L( SOC, SOC, Op_RegF, 2, as_FloatRegister(1)->as_VMReg());
  93 reg_def FPR2H( SOC, SOC, Op_RegF, 2, as_FloatRegister(1)->as_VMReg()->next());
  94 reg_def FPR3L( SOC, SOC, Op_RegF, 3, as_FloatRegister(2)->as_VMReg());
  95 reg_def FPR3H( SOC, SOC, Op_RegF, 3, as_FloatRegister(2)->as_VMReg()->next());
  96 reg_def FPR4L( SOC, SOC, Op_RegF, 4, as_FloatRegister(3)->as_VMReg());
  97 reg_def FPR4H( SOC, SOC, Op_RegF, 4, as_FloatRegister(3)->as_VMReg()->next());
  98 reg_def FPR5L( SOC, SOC, Op_RegF, 5, as_FloatRegister(4)->as_VMReg());
  99 reg_def FPR5H( SOC, SOC, Op_RegF, 5, as_FloatRegister(4)->as_VMReg()->next());
 100 reg_def FPR6L( SOC, SOC, Op_RegF, 6, as_FloatRegister(5)->as_VMReg());
 101 reg_def FPR6H( SOC, SOC, Op_RegF, 6, as_FloatRegister(5)->as_VMReg()->next());
 102 reg_def FPR7L( SOC, SOC, Op_RegF, 7, as_FloatRegister(6)->as_VMReg());
 103 reg_def FPR7H( SOC, SOC, Op_RegF, 7, as_FloatRegister(6)->as_VMReg()->next());
 104 
 105 // Specify priority of register selection within phases of register
 106 // allocation.  Highest priority is first.  A useful heuristic is to
 107 // give registers a low priority when they are required by machine
 108 // instructions, like EAX and EDX.  Registers which are used as
 109 // pairs must fall on an even boundary (witness the FPR#L's in this list).
 110 // For the Intel integer registers, the equivalent Long pairs are
 111 // EDX:EAX, EBX:ECX, and EDI:EBP.
 112 alloc_class chunk0( ECX,   EBX,   EBP,   EDI,   EAX,   EDX,   ESI, ESP,
 113                     FPR0L, FPR0H, FPR1L, FPR1H, FPR2L, FPR2H,
 114                     FPR3L, FPR3H, FPR4L, FPR4H, FPR5L, FPR5H,
 115                     FPR6L, FPR6H, FPR7L, FPR7H );
 116 
 117 
 118 //----------Architecture Description Register Classes--------------------------
 119 // Several register classes are automatically defined based upon information in
 120 // this architecture description.
 121 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 122 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 123 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 124 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 125 //
 126 // Class for all registers
 127 reg_class any_reg(EAX, EDX, EBP, EDI, ESI, ECX, EBX, ESP);
 128 // Class for general registers
 129 reg_class int_reg(EAX, EDX, EBP, EDI, ESI, ECX, EBX);
 130 // Class for general registers which may be used for implicit null checks on win95
 131 // Also safe for use by tailjump. We don't want to allocate in rbp,
 132 reg_class int_reg_no_rbp(EAX, EDX, EDI, ESI, ECX, EBX);
 133 // Class of "X" registers
 134 reg_class int_x_reg(EBX, ECX, EDX, EAX);
 135 // Class of registers that can appear in an address with no offset.
 136 // EBP and ESP require an extra instruction byte for zero offset.
 137 // Used in fast-unlock
 138 reg_class p_reg(EDX, EDI, ESI, EBX);
 139 // Class for general registers not including ECX
 140 reg_class ncx_reg(EAX, EDX, EBP, EDI, ESI, EBX);
 141 // Class for general registers not including EAX
 142 reg_class nax_reg(EDX, EDI, ESI, ECX, EBX);
 143 // Class for general registers not including EAX or EBX.
 144 reg_class nabx_reg(EDX, EDI, ESI, ECX, EBP);
 145 // Class of EAX (for multiply and divide operations)
 146 reg_class eax_reg(EAX);
 147 // Class of EBX (for atomic add)
 148 reg_class ebx_reg(EBX);
 149 // Class of ECX (for shift and JCXZ operations and cmpLTMask)
 150 reg_class ecx_reg(ECX);
 151 // Class of EDX (for multiply and divide operations)
 152 reg_class edx_reg(EDX);
 153 // Class of EDI (for synchronization)
 154 reg_class edi_reg(EDI);
 155 // Class of ESI (for synchronization)
 156 reg_class esi_reg(ESI);
 157 // Singleton class for interpreter's stack pointer
 158 reg_class ebp_reg(EBP);
 159 // Singleton class for stack pointer
 160 reg_class sp_reg(ESP);
 161 // Singleton class for instruction pointer
 162 // reg_class ip_reg(EIP);
 163 // Class of integer register pairs
 164 reg_class long_reg( EAX,EDX, ECX,EBX, EBP,EDI );
 165 // Class of integer register pairs that aligns with calling convention
 166 reg_class eadx_reg( EAX,EDX );
 167 reg_class ebcx_reg( ECX,EBX );
 168 // Not AX or DX, used in divides
 169 reg_class nadx_reg( EBX,ECX,ESI,EDI,EBP );
 170 
 171 // Floating point registers.  Notice FPR0 is not a choice.
 172 // FPR0 is not ever allocated; we use clever encodings to fake
 173 // a 2-address instructions out of Intels FP stack.
 174 reg_class fp_flt_reg( FPR1L,FPR2L,FPR3L,FPR4L,FPR5L,FPR6L,FPR7L );
 175 
 176 reg_class fp_dbl_reg( FPR1L,FPR1H, FPR2L,FPR2H, FPR3L,FPR3H,
 177                       FPR4L,FPR4H, FPR5L,FPR5H, FPR6L,FPR6H,
 178                       FPR7L,FPR7H );
 179 
 180 reg_class fp_flt_reg0( FPR1L );
 181 reg_class fp_dbl_reg0( FPR1L,FPR1H );
 182 reg_class fp_dbl_reg1( FPR2L,FPR2H );
 183 reg_class fp_dbl_notreg0( FPR2L,FPR2H, FPR3L,FPR3H, FPR4L,FPR4H,
 184                           FPR5L,FPR5H, FPR6L,FPR6H, FPR7L,FPR7H );
 185 
 186 %}
 187 
 188 
 189 //----------SOURCE BLOCK-------------------------------------------------------
 190 // This is a block of C++ code which provides values, functions, and
 191 // definitions necessary in the rest of the architecture description
 192 source_hpp %{
 193 // Must be visible to the DFA in dfa_x86_32.cpp
 194 extern bool is_operand_hi32_zero(Node* n);
 195 %}
 196 
 197 source %{
 198 #define   RELOC_IMM32    Assembler::imm_operand
 199 #define   RELOC_DISP32   Assembler::disp32_operand
 200 
 201 #define __ _masm.
 202 
 203 // How to find the high register of a Long pair, given the low register
 204 #define   HIGH_FROM_LOW(x) ((x)+2)
 205 
 206 // These masks are used to provide 128-bit aligned bitmasks to the XMM
 207 // instructions, to allow sign-masking or sign-bit flipping.  They allow
 208 // fast versions of NegF/NegD and AbsF/AbsD.
 209 
 210 // Note: 'double' and 'long long' have 32-bits alignment on x86.
 211 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
 212   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
 213   // of 128-bits operands for SSE instructions.
 214   jlong *operand = (jlong*)(((uintptr_t)adr)&((uintptr_t)(~0xF)));
 215   // Store the value to a 128-bits operand.
 216   operand[0] = lo;
 217   operand[1] = hi;
 218   return operand;
 219 }
 220 
 221 // Buffer for 128-bits masks used by SSE instructions.
 222 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
 223 
 224 // Static initialization during VM startup.
 225 static jlong *float_signmask_pool  = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF));
 226 static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF));
 227 static jlong *float_signflip_pool  = double_quadword(&fp_signmask_pool[3*2], CONST64(0x8000000080000000), CONST64(0x8000000080000000));
 228 static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
 229 
 230 // Offset hacking within calls.
 231 static int pre_call_resets_size() {
 232   int size = 0;
 233   Compile* C = Compile::current();
 234   if (C->in_24_bit_fp_mode()) {
 235     size += 6; // fldcw
 236   }
 237   if (C->max_vector_size() > 16) {
 238     size += 3; // vzeroupper
 239   }
 240   return size;
 241 }
 242 
 243 static int preserve_SP_size() {
 244   return 2;  // op, rm(reg/reg)
 245 }
 246 
 247 // !!!!! Special hack to get all type of calls to specify the byte offset
 248 //       from the start of the call to the point where the return address
 249 //       will point.
 250 int MachCallStaticJavaNode::ret_addr_offset() {
 251   int offset = 5 + pre_call_resets_size();  // 5 bytes from start of call to where return address points
 252   if (_method_handle_invoke)
 253     offset += preserve_SP_size();
 254   return offset;
 255 }
 256 
 257 int MachCallDynamicJavaNode::ret_addr_offset() {
 258   return 10 + pre_call_resets_size();  // 10 bytes from start of call to where return address points
 259 }
 260 
 261 static int sizeof_FFree_Float_Stack_All = -1;
 262 
 263 int MachCallRuntimeNode::ret_addr_offset() {
 264   assert(sizeof_FFree_Float_Stack_All != -1, "must have been emitted already");
 265   return sizeof_FFree_Float_Stack_All + 5 + pre_call_resets_size();
 266 }
 267 
 268 // Indicate if the safepoint node needs the polling page as an input.
 269 // Since x86 does have absolute addressing, it doesn't.
 270 bool SafePointNode::needs_polling_address_input() {
 271   return false;
 272 }
 273 
 274 //
 275 // Compute padding required for nodes which need alignment
 276 //
 277 
 278 // The address of the call instruction needs to be 4-byte aligned to
 279 // ensure that it does not span a cache line so that it can be patched.
 280 int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
 281   current_offset += pre_call_resets_size();  // skip fldcw, if any
 282   current_offset += 1;      // skip call opcode byte
 283   return round_to(current_offset, alignment_required()) - current_offset;
 284 }
 285 
 286 // The address of the call instruction needs to be 4-byte aligned to
 287 // ensure that it does not span a cache line so that it can be patched.
 288 int CallStaticJavaHandleNode::compute_padding(int current_offset) const {
 289   current_offset += pre_call_resets_size();  // skip fldcw, if any
 290   current_offset += preserve_SP_size();   // skip mov rbp, rsp
 291   current_offset += 1;      // skip call opcode byte
 292   return round_to(current_offset, alignment_required()) - current_offset;
 293 }
 294 
 295 // The address of the call instruction needs to be 4-byte aligned to
 296 // ensure that it does not span a cache line so that it can be patched.
 297 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
 298   current_offset += pre_call_resets_size();  // skip fldcw, if any
 299   current_offset += 5;      // skip MOV instruction
 300   current_offset += 1;      // skip call opcode byte
 301   return round_to(current_offset, alignment_required()) - current_offset;
 302 }
 303 
 304 // EMIT_RM()
 305 void emit_rm(CodeBuffer &cbuf, int f1, int f2, int f3) {
 306   unsigned char c = (unsigned char)((f1 << 6) | (f2 << 3) | f3);
 307   cbuf.insts()->emit_int8(c);
 308 }
 309 
 310 // EMIT_CC()
 311 void emit_cc(CodeBuffer &cbuf, int f1, int f2) {
 312   unsigned char c = (unsigned char)( f1 | f2 );
 313   cbuf.insts()->emit_int8(c);
 314 }
 315 
 316 // EMIT_OPCODE()
 317 void emit_opcode(CodeBuffer &cbuf, int code) {
 318   cbuf.insts()->emit_int8((unsigned char) code);
 319 }
 320 
 321 // EMIT_OPCODE() w/ relocation information
 322 void emit_opcode(CodeBuffer &cbuf, int code, relocInfo::relocType reloc, int offset = 0) {
 323   cbuf.relocate(cbuf.insts_mark() + offset, reloc);
 324   emit_opcode(cbuf, code);
 325 }
 326 
 327 // EMIT_D8()
 328 void emit_d8(CodeBuffer &cbuf, int d8) {
 329   cbuf.insts()->emit_int8((unsigned char) d8);
 330 }
 331 
 332 // EMIT_D16()
 333 void emit_d16(CodeBuffer &cbuf, int d16) {
 334   cbuf.insts()->emit_int16(d16);
 335 }
 336 
 337 // EMIT_D32()
 338 void emit_d32(CodeBuffer &cbuf, int d32) {
 339   cbuf.insts()->emit_int32(d32);
 340 }
 341 
 342 // emit 32 bit value and construct relocation entry from relocInfo::relocType
 343 void emit_d32_reloc(CodeBuffer &cbuf, int d32, relocInfo::relocType reloc,
 344         int format) {
 345   cbuf.relocate(cbuf.insts_mark(), reloc, format);
 346   cbuf.insts()->emit_int32(d32);
 347 }
 348 
 349 // emit 32 bit value and construct relocation entry from RelocationHolder
 350 void emit_d32_reloc(CodeBuffer &cbuf, int d32, RelocationHolder const& rspec,
 351         int format) {
 352 #ifdef ASSERT
 353   if (rspec.reloc()->type() == relocInfo::oop_type && d32 != 0 && d32 != (int)Universe::non_oop_word()) {
 354     assert(cast_to_oop(d32)->is_oop() && (ScavengeRootsInCode || !cast_to_oop(d32)->is_scavengable()), "cannot embed scavengable oops in code");
 355   }
 356 #endif
 357   cbuf.relocate(cbuf.insts_mark(), rspec, format);
 358   cbuf.insts()->emit_int32(d32);
 359 }
 360 
 361 // Access stack slot for load or store
 362 void store_to_stackslot(CodeBuffer &cbuf, int opcode, int rm_field, int disp) {
 363   emit_opcode( cbuf, opcode );               // (e.g., FILD   [ESP+src])
 364   if( -128 <= disp && disp <= 127 ) {
 365     emit_rm( cbuf, 0x01, rm_field, ESP_enc );  // R/M byte
 366     emit_rm( cbuf, 0x00, ESP_enc, ESP_enc);    // SIB byte
 367     emit_d8 (cbuf, disp);     // Displacement  // R/M byte
 368   } else {
 369     emit_rm( cbuf, 0x02, rm_field, ESP_enc );  // R/M byte
 370     emit_rm( cbuf, 0x00, ESP_enc, ESP_enc);    // SIB byte
 371     emit_d32(cbuf, disp);     // Displacement  // R/M byte
 372   }
 373 }
 374 
 375    // rRegI ereg, memory mem) %{    // emit_reg_mem
 376 void encode_RegMem( CodeBuffer &cbuf, int reg_encoding, int base, int index, int scale, int displace, relocInfo::relocType disp_reloc ) {
 377   // There is no index & no scale, use form without SIB byte
 378   if ((index == 0x4) &&
 379       (scale == 0) && (base != ESP_enc)) {
 380     // If no displacement, mode is 0x0; unless base is [EBP]
 381     if ( (displace == 0) && (base != EBP_enc) ) {
 382       emit_rm(cbuf, 0x0, reg_encoding, base);
 383     }
 384     else {                    // If 8-bit displacement, mode 0x1
 385       if ((displace >= -128) && (displace <= 127)
 386           && (disp_reloc == relocInfo::none) ) {
 387         emit_rm(cbuf, 0x1, reg_encoding, base);
 388         emit_d8(cbuf, displace);
 389       }
 390       else {                  // If 32-bit displacement
 391         if (base == -1) { // Special flag for absolute address
 392           emit_rm(cbuf, 0x0, reg_encoding, 0x5);
 393           // (manual lies; no SIB needed here)
 394           if ( disp_reloc != relocInfo::none ) {
 395             emit_d32_reloc(cbuf, displace, disp_reloc, 1);
 396           } else {
 397             emit_d32      (cbuf, displace);
 398           }
 399         }
 400         else {                // Normal base + offset
 401           emit_rm(cbuf, 0x2, reg_encoding, base);
 402           if ( disp_reloc != relocInfo::none ) {
 403             emit_d32_reloc(cbuf, displace, disp_reloc, 1);
 404           } else {
 405             emit_d32      (cbuf, displace);
 406           }
 407         }
 408       }
 409     }
 410   }
 411   else {                      // Else, encode with the SIB byte
 412     // If no displacement, mode is 0x0; unless base is [EBP]
 413     if (displace == 0 && (base != EBP_enc)) {  // If no displacement
 414       emit_rm(cbuf, 0x0, reg_encoding, 0x4);
 415       emit_rm(cbuf, scale, index, base);
 416     }
 417     else {                    // If 8-bit displacement, mode 0x1
 418       if ((displace >= -128) && (displace <= 127)
 419           && (disp_reloc == relocInfo::none) ) {
 420         emit_rm(cbuf, 0x1, reg_encoding, 0x4);
 421         emit_rm(cbuf, scale, index, base);
 422         emit_d8(cbuf, displace);
 423       }
 424       else {                  // If 32-bit displacement
 425         if (base == 0x04 ) {
 426           emit_rm(cbuf, 0x2, reg_encoding, 0x4);
 427           emit_rm(cbuf, scale, index, 0x04);
 428         } else {
 429           emit_rm(cbuf, 0x2, reg_encoding, 0x4);
 430           emit_rm(cbuf, scale, index, base);
 431         }
 432         if ( disp_reloc != relocInfo::none ) {
 433           emit_d32_reloc(cbuf, displace, disp_reloc, 1);
 434         } else {
 435           emit_d32      (cbuf, displace);
 436         }
 437       }
 438     }
 439   }
 440 }
 441 
 442 
 443 void encode_Copy( CodeBuffer &cbuf, int dst_encoding, int src_encoding ) {
 444   if( dst_encoding == src_encoding ) {
 445     // reg-reg copy, use an empty encoding
 446   } else {
 447     emit_opcode( cbuf, 0x8B );
 448     emit_rm(cbuf, 0x3, dst_encoding, src_encoding );
 449   }
 450 }
 451 
 452 void emit_cmpfp_fixup(MacroAssembler& _masm) {
 453   Label exit;
 454   __ jccb(Assembler::noParity, exit);
 455   __ pushf();
 456   //
 457   // comiss/ucomiss instructions set ZF,PF,CF flags and
 458   // zero OF,AF,SF for NaN values.
 459   // Fixup flags by zeroing ZF,PF so that compare of NaN
 460   // values returns 'less than' result (CF is set).
 461   // Leave the rest of flags unchanged.
 462   //
 463   //    7 6 5 4 3 2 1 0
 464   //   |S|Z|r|A|r|P|r|C|  (r - reserved bit)
 465   //    0 0 1 0 1 0 1 1   (0x2B)
 466   //
 467   __ andl(Address(rsp, 0), 0xffffff2b);
 468   __ popf();
 469   __ bind(exit);
 470 }
 471 
 472 void emit_cmpfp3(MacroAssembler& _masm, Register dst) {
 473   Label done;
 474   __ movl(dst, -1);
 475   __ jcc(Assembler::parity, done);
 476   __ jcc(Assembler::below, done);
 477   __ setb(Assembler::notEqual, dst);
 478   __ movzbl(dst, dst);
 479   __ bind(done);
 480 }
 481 
 482 
 483 //=============================================================================
 484 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
 485 
 486 int Compile::ConstantTable::calculate_table_base_offset() const {
 487   return 0;  // absolute addressing, no offset
 488 }
 489 
 490 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
 491 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
 492   ShouldNotReachHere();
 493 }
 494 
 495 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
 496   // Empty encoding
 497 }
 498 
 499 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
 500   return 0;
 501 }
 502 
 503 #ifndef PRODUCT
 504 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
 505   st->print("# MachConstantBaseNode (empty encoding)");
 506 }
 507 #endif
 508 
 509 
 510 //=============================================================================
 511 #ifndef PRODUCT
 512 void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
 513   Compile* C = ra_->C;
 514 
 515   int framesize = C->frame_size_in_bytes();
 516   int bangsize = C->bang_size_in_bytes();
 517   assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
 518   // Remove wordSize for return addr which is already pushed.
 519   framesize -= wordSize;
 520 
 521   if (C->need_stack_bang(bangsize)) {
 522     framesize -= wordSize;
 523     st->print("# stack bang (%d bytes)", bangsize);
 524     st->print("\n\t");
 525     st->print("PUSH   EBP\t# Save EBP");
 526     if (framesize) {
 527       st->print("\n\t");
 528       st->print("SUB    ESP, #%d\t# Create frame",framesize);
 529     }
 530   } else {
 531     st->print("SUB    ESP, #%d\t# Create frame",framesize);
 532     st->print("\n\t");
 533     framesize -= wordSize;
 534     st->print("MOV    [ESP + #%d], EBP\t# Save EBP",framesize);
 535   }
 536 
 537   if (VerifyStackAtCalls) {
 538     st->print("\n\t");
 539     framesize -= wordSize;
 540     st->print("MOV    [ESP + #%d], 0xBADB100D\t# Majik cookie for stack depth check",framesize);
 541   }
 542 
 543   if( C->in_24_bit_fp_mode() ) {
 544     st->print("\n\t");
 545     st->print("FLDCW  \t# load 24 bit fpu control word");
 546   }
 547   if (UseSSE >= 2 && VerifyFPU) {
 548     st->print("\n\t");
 549     st->print("# verify FPU stack (must be clean on entry)");
 550   }
 551 
 552 #ifdef ASSERT
 553   if (VerifyStackAtCalls) {
 554     st->print("\n\t");
 555     st->print("# stack alignment check");
 556   }
 557 #endif
 558   st->cr();
 559 }
 560 #endif
 561 
 562 
 563 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 564   Compile* C = ra_->C;
 565   MacroAssembler _masm(&cbuf);
 566 
 567   int framesize = C->frame_size_in_bytes();
 568   int bangsize = C->bang_size_in_bytes();
 569 
 570   __ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, C->in_24_bit_fp_mode());
 571 
 572   C->set_frame_complete(cbuf.insts_size());
 573 
 574   if (C->has_mach_constant_base_node()) {
 575     // NOTE: We set the table base offset here because users might be
 576     // emitted before MachConstantBaseNode.
 577     Compile::ConstantTable& constant_table = C->constant_table();
 578     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
 579   }
 580 }
 581 
 582 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
 583   return MachNode::size(ra_); // too many variables; just compute it the hard way
 584 }
 585 
 586 int MachPrologNode::reloc() const {
 587   return 0; // a large enough number
 588 }
 589 
 590 //=============================================================================
 591 #ifndef PRODUCT
 592 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
 593   Compile *C = ra_->C;
 594   int framesize = C->frame_size_in_bytes();
 595   assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
 596   // Remove two words for return addr and rbp,
 597   framesize -= 2*wordSize;
 598 
 599   if (C->max_vector_size() > 16) {
 600     st->print("VZEROUPPER");
 601     st->cr(); st->print("\t");
 602   }
 603   if (C->in_24_bit_fp_mode()) {
 604     st->print("FLDCW  standard control word");
 605     st->cr(); st->print("\t");
 606   }
 607   if (framesize) {
 608     st->print("ADD    ESP,%d\t# Destroy frame",framesize);
 609     st->cr(); st->print("\t");
 610   }
 611   st->print_cr("POPL   EBP"); st->print("\t");
 612   if (do_polling() && C->is_method_compilation()) {
 613     st->print("TEST   PollPage,EAX\t! Poll Safepoint");
 614     st->cr(); st->print("\t");
 615   }
 616 }
 617 #endif
 618 
 619 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 620   Compile *C = ra_->C;
 621 
 622   if (C->max_vector_size() > 16) {
 623     // Clear upper bits of YMM registers when current compiled code uses
 624     // wide vectors to avoid AVX <-> SSE transition penalty during call.
 625     MacroAssembler masm(&cbuf);
 626     masm.vzeroupper();
 627   }
 628   // If method set FPU control word, restore to standard control word
 629   if (C->in_24_bit_fp_mode()) {
 630     MacroAssembler masm(&cbuf);
 631     masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
 632   }
 633 
 634   int framesize = C->frame_size_in_bytes();
 635   assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
 636   // Remove two words for return addr and rbp,
 637   framesize -= 2*wordSize;
 638 
 639   // Note that VerifyStackAtCalls' Majik cookie does not change the frame size popped here
 640 
 641   if (framesize >= 128) {
 642     emit_opcode(cbuf, 0x81); // add  SP, #framesize
 643     emit_rm(cbuf, 0x3, 0x00, ESP_enc);
 644     emit_d32(cbuf, framesize);
 645   } else if (framesize) {
 646     emit_opcode(cbuf, 0x83); // add  SP, #framesize
 647     emit_rm(cbuf, 0x3, 0x00, ESP_enc);
 648     emit_d8(cbuf, framesize);
 649   }
 650 
 651   emit_opcode(cbuf, 0x58 | EBP_enc);
 652 
 653   if (do_polling() && C->is_method_compilation()) {
 654     cbuf.relocate(cbuf.insts_end(), relocInfo::poll_return_type, 0);
 655     emit_opcode(cbuf,0x85);
 656     emit_rm(cbuf, 0x0, EAX_enc, 0x5); // EAX
 657     emit_d32(cbuf, (intptr_t)os::get_polling_page());
 658   }
 659 }
 660 
 661 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 662   Compile *C = ra_->C;
 663   // If method set FPU control word, restore to standard control word
 664   int size = C->in_24_bit_fp_mode() ? 6 : 0;
 665   if (C->max_vector_size() > 16) size += 3; // vzeroupper
 666   if (do_polling() && C->is_method_compilation()) size += 6;
 667 
 668   int framesize = C->frame_size_in_bytes();
 669   assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
 670   // Remove two words for return addr and rbp,
 671   framesize -= 2*wordSize;
 672 
 673   size++; // popl rbp,
 674 
 675   if (framesize >= 128) {
 676     size += 6;
 677   } else {
 678     size += framesize ? 3 : 0;
 679   }
 680   return size;
 681 }
 682 
 683 int MachEpilogNode::reloc() const {
 684   return 0; // a large enough number
 685 }
 686 
 687 const Pipeline * MachEpilogNode::pipeline() const {
 688   return MachNode::pipeline_class();
 689 }
 690 
 691 int MachEpilogNode::safepoint_offset() const { return 0; }
 692 
 693 //=============================================================================
 694 
 695 enum RC { rc_bad, rc_int, rc_float, rc_xmm, rc_stack };
 696 static enum RC rc_class( OptoReg::Name reg ) {
 697 
 698   if( !OptoReg::is_valid(reg)  ) return rc_bad;
 699   if (OptoReg::is_stack(reg)) return rc_stack;
 700 
 701   VMReg r = OptoReg::as_VMReg(reg);
 702   if (r->is_Register()) return rc_int;
 703   if (r->is_FloatRegister()) {
 704     assert(UseSSE < 2, "shouldn't be used in SSE2+ mode");
 705     return rc_float;
 706   }
 707   assert(r->is_XMMRegister(), "must be");
 708   return rc_xmm;
 709 }
 710 
 711 static int impl_helper( CodeBuffer *cbuf, bool do_size, bool is_load, int offset, int reg,
 712                         int opcode, const char *op_str, int size, outputStream* st ) {
 713   if( cbuf ) {
 714     emit_opcode  (*cbuf, opcode );
 715     encode_RegMem(*cbuf, Matcher::_regEncode[reg], ESP_enc, 0x4, 0, offset, relocInfo::none);
 716 #ifndef PRODUCT
 717   } else if( !do_size ) {
 718     if( size != 0 ) st->print("\n\t");
 719     if( opcode == 0x8B || opcode == 0x89 ) { // MOV
 720       if( is_load ) st->print("%s   %s,[ESP + #%d]",op_str,Matcher::regName[reg],offset);
 721       else          st->print("%s   [ESP + #%d],%s",op_str,offset,Matcher::regName[reg]);
 722     } else { // FLD, FST, PUSH, POP
 723       st->print("%s [ESP + #%d]",op_str,offset);
 724     }
 725 #endif
 726   }
 727   int offset_size = (offset == 0) ? 0 : ((offset <= 127) ? 1 : 4);
 728   return size+3+offset_size;
 729 }
 730 
 731 // Helper for XMM registers.  Extra opcode bits, limited syntax.
 732 static int impl_x_helper( CodeBuffer *cbuf, bool do_size, bool is_load,
 733                          int offset, int reg_lo, int reg_hi, int size, outputStream* st ) {
 734   if (cbuf) {
 735     MacroAssembler _masm(cbuf);
 736     if (reg_lo+1 == reg_hi) { // double move?
 737       if (is_load) {
 738         __ movdbl(as_XMMRegister(Matcher::_regEncode[reg_lo]), Address(rsp, offset));
 739       } else {
 740         __ movdbl(Address(rsp, offset), as_XMMRegister(Matcher::_regEncode[reg_lo]));
 741       }
 742     } else {
 743       if (is_load) {
 744         __ movflt(as_XMMRegister(Matcher::_regEncode[reg_lo]), Address(rsp, offset));
 745       } else {
 746         __ movflt(Address(rsp, offset), as_XMMRegister(Matcher::_regEncode[reg_lo]));
 747       }
 748     }
 749 #ifndef PRODUCT
 750   } else if (!do_size) {
 751     if (size != 0) st->print("\n\t");
 752     if (reg_lo+1 == reg_hi) { // double move?
 753       if (is_load) st->print("%s %s,[ESP + #%d]",
 754                               UseXmmLoadAndClearUpper ? "MOVSD " : "MOVLPD",
 755                               Matcher::regName[reg_lo], offset);
 756       else         st->print("MOVSD  [ESP + #%d],%s",
 757                               offset, Matcher::regName[reg_lo]);
 758     } else {
 759       if (is_load) st->print("MOVSS  %s,[ESP + #%d]",
 760                               Matcher::regName[reg_lo], offset);
 761       else         st->print("MOVSS  [ESP + #%d],%s",
 762                               offset, Matcher::regName[reg_lo]);
 763     }
 764 #endif
 765   }
 766   int offset_size = (offset == 0) ? 0 : ((offset <= 127) ? 1 : 4);
 767   // VEX_2bytes prefix is used if UseAVX > 0, so it takes the same 2 bytes as SIMD prefix.
 768   return size+5+offset_size;
 769 }
 770 
 771 
 772 static int impl_movx_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
 773                             int src_hi, int dst_hi, int size, outputStream* st ) {
 774   if (cbuf) {
 775     MacroAssembler _masm(cbuf);
 776     if (src_lo+1 == src_hi && dst_lo+1 == dst_hi) { // double move?
 777       __ movdbl(as_XMMRegister(Matcher::_regEncode[dst_lo]),
 778                 as_XMMRegister(Matcher::_regEncode[src_lo]));
 779     } else {
 780       __ movflt(as_XMMRegister(Matcher::_regEncode[dst_lo]),
 781                 as_XMMRegister(Matcher::_regEncode[src_lo]));
 782     }
 783 #ifndef PRODUCT
 784   } else if (!do_size) {
 785     if (size != 0) st->print("\n\t");
 786     if (UseXmmRegToRegMoveAll) {//Use movaps,movapd to move between xmm registers
 787       if (src_lo+1 == src_hi && dst_lo+1 == dst_hi) { // double move?
 788         st->print("MOVAPD %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]);
 789       } else {
 790         st->print("MOVAPS %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]);
 791       }
 792     } else {
 793       if( src_lo+1 == src_hi && dst_lo+1 == dst_hi ) { // double move?
 794         st->print("MOVSD  %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]);
 795       } else {
 796         st->print("MOVSS  %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]);
 797       }
 798     }
 799 #endif
 800   }
 801   // VEX_2bytes prefix is used if UseAVX > 0, and it takes the same 2 bytes as SIMD prefix.
 802   // Only MOVAPS SSE prefix uses 1 byte.
 803   int sz = 4;
 804   if (!(src_lo+1 == src_hi && dst_lo+1 == dst_hi) &&
 805       UseXmmRegToRegMoveAll && (UseAVX == 0)) sz = 3;
 806   return size + sz;
 807 }
 808 
 809 static int impl_movgpr2x_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
 810                             int src_hi, int dst_hi, int size, outputStream* st ) {
 811   // 32-bit
 812   if (cbuf) {
 813     MacroAssembler _masm(cbuf);
 814     __ movdl(as_XMMRegister(Matcher::_regEncode[dst_lo]),
 815              as_Register(Matcher::_regEncode[src_lo]));
 816 #ifndef PRODUCT
 817   } else if (!do_size) {
 818     st->print("movdl   %s, %s\t# spill", Matcher::regName[dst_lo], Matcher::regName[src_lo]);
 819 #endif
 820   }
 821   return 4;
 822 }
 823 
 824 
 825 static int impl_movx2gpr_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
 826                                  int src_hi, int dst_hi, int size, outputStream* st ) {
 827   // 32-bit
 828   if (cbuf) {
 829     MacroAssembler _masm(cbuf);
 830     __ movdl(as_Register(Matcher::_regEncode[dst_lo]),
 831              as_XMMRegister(Matcher::_regEncode[src_lo]));
 832 #ifndef PRODUCT
 833   } else if (!do_size) {
 834     st->print("movdl   %s, %s\t# spill", Matcher::regName[dst_lo], Matcher::regName[src_lo]);
 835 #endif
 836   }
 837   return 4;
 838 }
 839 
 840 static int impl_mov_helper( CodeBuffer *cbuf, bool do_size, int src, int dst, int size, outputStream* st ) {
 841   if( cbuf ) {
 842     emit_opcode(*cbuf, 0x8B );
 843     emit_rm    (*cbuf, 0x3, Matcher::_regEncode[dst], Matcher::_regEncode[src] );
 844 #ifndef PRODUCT
 845   } else if( !do_size ) {
 846     if( size != 0 ) st->print("\n\t");
 847     st->print("MOV    %s,%s",Matcher::regName[dst],Matcher::regName[src]);
 848 #endif
 849   }
 850   return size+2;
 851 }
 852 
 853 static int impl_fp_store_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int src_hi, int dst_lo, int dst_hi,
 854                                  int offset, int size, outputStream* st ) {
 855   if( src_lo != FPR1L_num ) {      // Move value to top of FP stack, if not already there
 856     if( cbuf ) {
 857       emit_opcode( *cbuf, 0xD9 );  // FLD (i.e., push it)
 858       emit_d8( *cbuf, 0xC0-1+Matcher::_regEncode[src_lo] );
 859 #ifndef PRODUCT
 860     } else if( !do_size ) {
 861       if( size != 0 ) st->print("\n\t");
 862       st->print("FLD    %s",Matcher::regName[src_lo]);
 863 #endif
 864     }
 865     size += 2;
 866   }
 867 
 868   int st_op = (src_lo != FPR1L_num) ? EBX_num /*store & pop*/ : EDX_num /*store no pop*/;
 869   const char *op_str;
 870   int op;
 871   if( src_lo+1 == src_hi && dst_lo+1 == dst_hi ) { // double store?
 872     op_str = (src_lo != FPR1L_num) ? "FSTP_D" : "FST_D ";
 873     op = 0xDD;
 874   } else {                   // 32-bit store
 875     op_str = (src_lo != FPR1L_num) ? "FSTP_S" : "FST_S ";
 876     op = 0xD9;
 877     assert( !OptoReg::is_valid(src_hi) && !OptoReg::is_valid(dst_hi), "no non-adjacent float-stores" );
 878   }
 879 
 880   return impl_helper(cbuf,do_size,false,offset,st_op,op,op_str,size, st);
 881 }
 882 
 883 // Next two methods are shared by 32- and 64-bit VM. They are defined in x86.ad.
 884 static int vec_mov_helper(CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
 885                           int src_hi, int dst_hi, uint ireg, outputStream* st);
 886 
 887 static int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load,
 888                             int stack_offset, int reg, uint ireg, outputStream* st);
 889 
 890 static int vec_stack_to_stack_helper(CodeBuffer *cbuf, bool do_size, int src_offset,
 891                                      int dst_offset, uint ireg, outputStream* st) {
 892   int calc_size = 0;
 893   int src_offset_size = (src_offset == 0) ? 0 : ((src_offset < 0x80) ? 1 : 4);
 894   int dst_offset_size = (dst_offset == 0) ? 0 : ((dst_offset < 0x80) ? 1 : 4);
 895   switch (ireg) {
 896   case Op_VecS:
 897     calc_size = 3+src_offset_size + 3+dst_offset_size;
 898     break;
 899   case Op_VecD:
 900     calc_size = 3+src_offset_size + 3+dst_offset_size;
 901     src_offset += 4;
 902     dst_offset += 4;
 903     src_offset_size = (src_offset == 0) ? 0 : ((src_offset < 0x80) ? 1 : 4);
 904     dst_offset_size = (dst_offset == 0) ? 0 : ((dst_offset < 0x80) ? 1 : 4);
 905     calc_size += 3+src_offset_size + 3+dst_offset_size;
 906     break;
 907   case Op_VecX:
 908     calc_size = 6 + 6 + 5+src_offset_size + 5+dst_offset_size;
 909     break;
 910   case Op_VecY:
 911     calc_size = 6 + 6 + 5+src_offset_size + 5+dst_offset_size;
 912     break;
 913   default:
 914     ShouldNotReachHere();
 915   }
 916   if (cbuf) {
 917     MacroAssembler _masm(cbuf);
 918     int offset = __ offset();
 919     switch (ireg) {
 920     case Op_VecS:
 921       __ pushl(Address(rsp, src_offset));
 922       __ popl (Address(rsp, dst_offset));
 923       break;
 924     case Op_VecD:
 925       __ pushl(Address(rsp, src_offset));
 926       __ popl (Address(rsp, dst_offset));
 927       __ pushl(Address(rsp, src_offset+4));
 928       __ popl (Address(rsp, dst_offset+4));
 929       break;
 930     case Op_VecX:
 931       __ movdqu(Address(rsp, -16), xmm0);
 932       __ movdqu(xmm0, Address(rsp, src_offset));
 933       __ movdqu(Address(rsp, dst_offset), xmm0);
 934       __ movdqu(xmm0, Address(rsp, -16));
 935       break;
 936     case Op_VecY:
 937       __ vmovdqu(Address(rsp, -32), xmm0);
 938       __ vmovdqu(xmm0, Address(rsp, src_offset));
 939       __ vmovdqu(Address(rsp, dst_offset), xmm0);
 940       __ vmovdqu(xmm0, Address(rsp, -32));
 941       break;
 942     default:
 943       ShouldNotReachHere();
 944     }
 945     int size = __ offset() - offset;
 946     assert(size == calc_size, "incorrect size calculattion");
 947     return size;
 948 #ifndef PRODUCT
 949   } else if (!do_size) {
 950     switch (ireg) {
 951     case Op_VecS:
 952       st->print("pushl   [rsp + #%d]\t# 32-bit mem-mem spill\n\t"
 953                 "popl    [rsp + #%d]",
 954                 src_offset, dst_offset);
 955       break;
 956     case Op_VecD:
 957       st->print("pushl   [rsp + #%d]\t# 64-bit mem-mem spill\n\t"
 958                 "popq    [rsp + #%d]\n\t"
 959                 "pushl   [rsp + #%d]\n\t"
 960                 "popq    [rsp + #%d]",
 961                 src_offset, dst_offset, src_offset+4, dst_offset+4);
 962       break;
 963      case Op_VecX:
 964       st->print("movdqu  [rsp - #16], xmm0\t# 128-bit mem-mem spill\n\t"
 965                 "movdqu  xmm0, [rsp + #%d]\n\t"
 966                 "movdqu  [rsp + #%d], xmm0\n\t"
 967                 "movdqu  xmm0, [rsp - #16]",
 968                 src_offset, dst_offset);
 969       break;
 970     case Op_VecY:
 971       st->print("vmovdqu [rsp - #32], xmm0\t# 256-bit mem-mem spill\n\t"
 972                 "vmovdqu xmm0, [rsp + #%d]\n\t"
 973                 "vmovdqu [rsp + #%d], xmm0\n\t"
 974                 "vmovdqu xmm0, [rsp - #32]",
 975                 src_offset, dst_offset);
 976       break;
 977     default:
 978       ShouldNotReachHere();
 979     }
 980 #endif
 981   }
 982   return calc_size;
 983 }
 984 
 985 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
 986   // Get registers to move
 987   OptoReg::Name src_second = ra_->get_reg_second(in(1));
 988   OptoReg::Name src_first = ra_->get_reg_first(in(1));
 989   OptoReg::Name dst_second = ra_->get_reg_second(this );
 990   OptoReg::Name dst_first = ra_->get_reg_first(this );
 991 
 992   enum RC src_second_rc = rc_class(src_second);
 993   enum RC src_first_rc = rc_class(src_first);
 994   enum RC dst_second_rc = rc_class(dst_second);
 995   enum RC dst_first_rc = rc_class(dst_first);
 996 
 997   assert( OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
 998 
 999   // Generate spill code!
1000   int size = 0;
1001 
1002   if( src_first == dst_first && src_second == dst_second )
1003     return size;            // Self copy, no move
1004 
1005   if (bottom_type()->isa_vect() != NULL) {
1006     uint ireg = ideal_reg();
1007     assert((src_first_rc != rc_int && dst_first_rc != rc_int), "sanity");
1008     assert((src_first_rc != rc_float && dst_first_rc != rc_float), "sanity");
1009     assert((ireg == Op_VecS || ireg == Op_VecD || ireg == Op_VecX || ireg == Op_VecY), "sanity");
1010     if( src_first_rc == rc_stack && dst_first_rc == rc_stack ) {
1011       // mem -> mem
1012       int src_offset = ra_->reg2offset(src_first);
1013       int dst_offset = ra_->reg2offset(dst_first);
1014       return vec_stack_to_stack_helper(cbuf, do_size, src_offset, dst_offset, ireg, st);
1015     } else if (src_first_rc == rc_xmm && dst_first_rc == rc_xmm ) {
1016       return vec_mov_helper(cbuf, do_size, src_first, dst_first, src_second, dst_second, ireg, st);
1017     } else if (src_first_rc == rc_xmm && dst_first_rc == rc_stack ) {
1018       int stack_offset = ra_->reg2offset(dst_first);
1019       return vec_spill_helper(cbuf, do_size, false, stack_offset, src_first, ireg, st);
1020     } else if (src_first_rc == rc_stack && dst_first_rc == rc_xmm ) {
1021       int stack_offset = ra_->reg2offset(src_first);
1022       return vec_spill_helper(cbuf, do_size, true,  stack_offset, dst_first, ireg, st);
1023     } else {
1024       ShouldNotReachHere();
1025     }
1026   }
1027 
1028   // --------------------------------------
1029   // Check for mem-mem move.  push/pop to move.
1030   if( src_first_rc == rc_stack && dst_first_rc == rc_stack ) {
1031     if( src_second == dst_first ) { // overlapping stack copy ranges
1032       assert( src_second_rc == rc_stack && dst_second_rc == rc_stack, "we only expect a stk-stk copy here" );
1033       size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_second),ESI_num,0xFF,"PUSH  ",size, st);
1034       size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_second),EAX_num,0x8F,"POP   ",size, st);
1035       src_second_rc = dst_second_rc = rc_bad;  // flag as already moved the second bits
1036     }
1037     // move low bits
1038     size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_first),ESI_num,0xFF,"PUSH  ",size, st);
1039     size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_first),EAX_num,0x8F,"POP   ",size, st);
1040     if( src_second_rc == rc_stack && dst_second_rc == rc_stack ) { // mov second bits
1041       size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_second),ESI_num,0xFF,"PUSH  ",size, st);
1042       size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_second),EAX_num,0x8F,"POP   ",size, st);
1043     }
1044     return size;
1045   }
1046 
1047   // --------------------------------------
1048   // Check for integer reg-reg copy
1049   if( src_first_rc == rc_int && dst_first_rc == rc_int )
1050     size = impl_mov_helper(cbuf,do_size,src_first,dst_first,size, st);
1051 
1052   // Check for integer store
1053   if( src_first_rc == rc_int && dst_first_rc == rc_stack )
1054     size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_first),src_first,0x89,"MOV ",size, st);
1055 
1056   // Check for integer load
1057   if( dst_first_rc == rc_int && src_first_rc == rc_stack )
1058     size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_first),dst_first,0x8B,"MOV ",size, st);
1059 
1060   // Check for integer reg-xmm reg copy
1061   if( src_first_rc == rc_int && dst_first_rc == rc_xmm ) {
1062     assert( (src_second_rc == rc_bad && dst_second_rc == rc_bad),
1063             "no 64 bit integer-float reg moves" );
1064     return impl_movgpr2x_helper(cbuf,do_size,src_first,dst_first,src_second, dst_second, size, st);
1065   }
1066   // --------------------------------------
1067   // Check for float reg-reg copy
1068   if( src_first_rc == rc_float && dst_first_rc == rc_float ) {
1069     assert( (src_second_rc == rc_bad && dst_second_rc == rc_bad) ||
1070             (src_first+1 == src_second && dst_first+1 == dst_second), "no non-adjacent float-moves" );
1071     if( cbuf ) {
1072 
1073       // Note the mucking with the register encode to compensate for the 0/1
1074       // indexing issue mentioned in a comment in the reg_def sections
1075       // for FPR registers many lines above here.
1076 
1077       if( src_first != FPR1L_num ) {
1078         emit_opcode  (*cbuf, 0xD9 );           // FLD    ST(i)
1079         emit_d8      (*cbuf, 0xC0+Matcher::_regEncode[src_first]-1 );
1080         emit_opcode  (*cbuf, 0xDD );           // FSTP   ST(i)
1081         emit_d8      (*cbuf, 0xD8+Matcher::_regEncode[dst_first] );
1082      } else {
1083         emit_opcode  (*cbuf, 0xDD );           // FST    ST(i)
1084         emit_d8      (*cbuf, 0xD0+Matcher::_regEncode[dst_first]-1 );
1085      }
1086 #ifndef PRODUCT
1087     } else if( !do_size ) {
1088       if( size != 0 ) st->print("\n\t");
1089       if( src_first != FPR1L_num ) st->print("FLD    %s\n\tFSTP   %s",Matcher::regName[src_first],Matcher::regName[dst_first]);
1090       else                      st->print(             "FST    %s",                            Matcher::regName[dst_first]);
1091 #endif
1092     }
1093     return size + ((src_first != FPR1L_num) ? 2+2 : 2);
1094   }
1095 
1096   // Check for float store
1097   if( src_first_rc == rc_float && dst_first_rc == rc_stack ) {
1098     return impl_fp_store_helper(cbuf,do_size,src_first,src_second,dst_first,dst_second,ra_->reg2offset(dst_first),size, st);
1099   }
1100 
1101   // Check for float load
1102   if( dst_first_rc == rc_float && src_first_rc == rc_stack ) {
1103     int offset = ra_->reg2offset(src_first);
1104     const char *op_str;
1105     int op;
1106     if( src_first+1 == src_second && dst_first+1 == dst_second ) { // double load?
1107       op_str = "FLD_D";
1108       op = 0xDD;
1109     } else {                   // 32-bit load
1110       op_str = "FLD_S";
1111       op = 0xD9;
1112       assert( src_second_rc == rc_bad && dst_second_rc == rc_bad, "no non-adjacent float-loads" );
1113     }
1114     if( cbuf ) {
1115       emit_opcode  (*cbuf, op );
1116       encode_RegMem(*cbuf, 0x0, ESP_enc, 0x4, 0, offset, relocInfo::none);
1117       emit_opcode  (*cbuf, 0xDD );           // FSTP   ST(i)
1118       emit_d8      (*cbuf, 0xD8+Matcher::_regEncode[dst_first] );
1119 #ifndef PRODUCT
1120     } else if( !do_size ) {
1121       if( size != 0 ) st->print("\n\t");
1122       st->print("%s  ST,[ESP + #%d]\n\tFSTP   %s",op_str, offset,Matcher::regName[dst_first]);
1123 #endif
1124     }
1125     int offset_size = (offset == 0) ? 0 : ((offset <= 127) ? 1 : 4);
1126     return size + 3+offset_size+2;
1127   }
1128 
1129   // Check for xmm reg-reg copy
1130   if( src_first_rc == rc_xmm && dst_first_rc == rc_xmm ) {
1131     assert( (src_second_rc == rc_bad && dst_second_rc == rc_bad) ||
1132             (src_first+1 == src_second && dst_first+1 == dst_second),
1133             "no non-adjacent float-moves" );
1134     return impl_movx_helper(cbuf,do_size,src_first,dst_first,src_second, dst_second, size, st);
1135   }
1136 
1137   // Check for xmm reg-integer reg copy
1138   if( src_first_rc == rc_xmm && dst_first_rc == rc_int ) {
1139     assert( (src_second_rc == rc_bad && dst_second_rc == rc_bad),
1140             "no 64 bit float-integer reg moves" );
1141     return impl_movx2gpr_helper(cbuf,do_size,src_first,dst_first,src_second, dst_second, size, st);
1142   }
1143 
1144   // Check for xmm store
1145   if( src_first_rc == rc_xmm && dst_first_rc == rc_stack ) {
1146     return impl_x_helper(cbuf,do_size,false,ra_->reg2offset(dst_first),src_first, src_second, size, st);
1147   }
1148 
1149   // Check for float xmm load
1150   if( dst_first_rc == rc_xmm && src_first_rc == rc_stack ) {
1151     return impl_x_helper(cbuf,do_size,true ,ra_->reg2offset(src_first),dst_first, dst_second, size, st);
1152   }
1153 
1154   // Copy from float reg to xmm reg
1155   if( dst_first_rc == rc_xmm && src_first_rc == rc_float ) {
1156     // copy to the top of stack from floating point reg
1157     // and use LEA to preserve flags
1158     if( cbuf ) {
1159       emit_opcode(*cbuf,0x8D);  // LEA  ESP,[ESP-8]
1160       emit_rm(*cbuf, 0x1, ESP_enc, 0x04);
1161       emit_rm(*cbuf, 0x0, 0x04, ESP_enc);
1162       emit_d8(*cbuf,0xF8);
1163 #ifndef PRODUCT
1164     } else if( !do_size ) {
1165       if( size != 0 ) st->print("\n\t");
1166       st->print("LEA    ESP,[ESP-8]");
1167 #endif
1168     }
1169     size += 4;
1170 
1171     size = impl_fp_store_helper(cbuf,do_size,src_first,src_second,dst_first,dst_second,0,size, st);
1172 
1173     // Copy from the temp memory to the xmm reg.
1174     size = impl_x_helper(cbuf,do_size,true ,0,dst_first, dst_second, size, st);
1175 
1176     if( cbuf ) {
1177       emit_opcode(*cbuf,0x8D);  // LEA  ESP,[ESP+8]
1178       emit_rm(*cbuf, 0x1, ESP_enc, 0x04);
1179       emit_rm(*cbuf, 0x0, 0x04, ESP_enc);
1180       emit_d8(*cbuf,0x08);
1181 #ifndef PRODUCT
1182     } else if( !do_size ) {
1183       if( size != 0 ) st->print("\n\t");
1184       st->print("LEA    ESP,[ESP+8]");
1185 #endif
1186     }
1187     size += 4;
1188     return size;
1189   }
1190 
1191   assert( size > 0, "missed a case" );
1192 
1193   // --------------------------------------------------------------------
1194   // Check for second bits still needing moving.
1195   if( src_second == dst_second )
1196     return size;               // Self copy; no move
1197   assert( src_second_rc != rc_bad && dst_second_rc != rc_bad, "src_second & dst_second cannot be Bad" );
1198 
1199   // Check for second word int-int move
1200   if( src_second_rc == rc_int && dst_second_rc == rc_int )
1201     return impl_mov_helper(cbuf,do_size,src_second,dst_second,size, st);
1202 
1203   // Check for second word integer store
1204   if( src_second_rc == rc_int && dst_second_rc == rc_stack )
1205     return impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_second),src_second,0x89,"MOV ",size, st);
1206 
1207   // Check for second word integer load
1208   if( dst_second_rc == rc_int && src_second_rc == rc_stack )
1209     return impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_second),dst_second,0x8B,"MOV ",size, st);
1210 
1211 
1212   Unimplemented();
1213 }
1214 
1215 #ifndef PRODUCT
1216 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream* st) const {
1217   implementation( NULL, ra_, false, st );
1218 }
1219 #endif
1220 
1221 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1222   implementation( &cbuf, ra_, false, NULL );
1223 }
1224 
1225 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1226   return implementation( NULL, ra_, true, NULL );
1227 }
1228 
1229 
1230 //=============================================================================
1231 #ifndef PRODUCT
1232 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1233   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1234   int reg = ra_->get_reg_first(this);
1235   st->print("LEA    %s,[ESP + #%d]",Matcher::regName[reg],offset);
1236 }
1237 #endif
1238 
1239 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1240   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1241   int reg = ra_->get_encode(this);
1242   if( offset >= 128 ) {
1243     emit_opcode(cbuf, 0x8D);      // LEA  reg,[SP+offset]
1244     emit_rm(cbuf, 0x2, reg, 0x04);
1245     emit_rm(cbuf, 0x0, 0x04, ESP_enc);
1246     emit_d32(cbuf, offset);
1247   }
1248   else {
1249     emit_opcode(cbuf, 0x8D);      // LEA  reg,[SP+offset]
1250     emit_rm(cbuf, 0x1, reg, 0x04);
1251     emit_rm(cbuf, 0x0, 0x04, ESP_enc);
1252     emit_d8(cbuf, offset);
1253   }
1254 }
1255 
1256 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1257   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1258   if( offset >= 128 ) {
1259     return 7;
1260   }
1261   else {
1262     return 4;
1263   }
1264 }
1265 
1266 //=============================================================================
1267 #ifndef PRODUCT
1268 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1269   st->print_cr(  "CMP    EAX,[ECX+4]\t# Inline cache check");
1270   st->print_cr("\tJNE    SharedRuntime::handle_ic_miss_stub");
1271   st->print_cr("\tNOP");
1272   st->print_cr("\tNOP");
1273   if( !OptoBreakpoint )
1274     st->print_cr("\tNOP");
1275 }
1276 #endif
1277 
1278 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1279   MacroAssembler masm(&cbuf);
1280 #ifdef ASSERT
1281   uint insts_size = cbuf.insts_size();
1282 #endif
1283   masm.cmpptr(rax, Address(rcx, oopDesc::klass_offset_in_bytes()));
1284   masm.jump_cc(Assembler::notEqual,
1285                RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1286   /* WARNING these NOPs are critical so that verified entry point is properly
1287      aligned for patching by NativeJump::patch_verified_entry() */
1288   int nops_cnt = 2;
1289   if( !OptoBreakpoint ) // Leave space for int3
1290      nops_cnt += 1;
1291   masm.nop(nops_cnt);
1292 
1293   assert(cbuf.insts_size() - insts_size == size(ra_), "checking code size of inline cache node");
1294 }
1295 
1296 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1297   return OptoBreakpoint ? 11 : 12;
1298 }
1299 
1300 
1301 //=============================================================================
1302 uint size_exception_handler() {
1303   // NativeCall instruction size is the same as NativeJump.
1304   // exception handler starts out as jump and can be patched to
1305   // a call be deoptimization.  (4932387)
1306   // Note that this value is also credited (in output.cpp) to
1307   // the size of the code section.
1308   return NativeJump::instruction_size;
1309 }
1310 
1311 // Emit exception handler code.  Stuff framesize into a register
1312 // and call a VM stub routine.
1313 int emit_exception_handler(CodeBuffer& cbuf) {
1314 
1315   // Note that the code buffer's insts_mark is always relative to insts.
1316   // That's why we must use the macroassembler to generate a handler.
1317   MacroAssembler _masm(&cbuf);
1318   address base =
1319   __ start_a_stub(size_exception_handler());
1320   if (base == NULL)  return 0;  // CodeBuffer::expand failed
1321   int offset = __ offset();
1322   __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
1323   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
1324   __ end_a_stub();
1325   return offset;
1326 }
1327 
1328 uint size_deopt_handler() {
1329   // NativeCall instruction size is the same as NativeJump.
1330   // exception handler starts out as jump and can be patched to
1331   // a call be deoptimization.  (4932387)
1332   // Note that this value is also credited (in output.cpp) to
1333   // the size of the code section.
1334   return 5 + NativeJump::instruction_size; // pushl(); jmp;
1335 }
1336 
1337 // Emit deopt handler code.
1338 int emit_deopt_handler(CodeBuffer& cbuf) {
1339 
1340   // Note that the code buffer's insts_mark is always relative to insts.
1341   // That's why we must use the macroassembler to generate a handler.
1342   MacroAssembler _masm(&cbuf);
1343   address base =
1344   __ start_a_stub(size_exception_handler());
1345   if (base == NULL)  return 0;  // CodeBuffer::expand failed
1346   int offset = __ offset();
1347   InternalAddress here(__ pc());
1348   __ pushptr(here.addr());
1349 
1350   __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
1351   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
1352   __ end_a_stub();
1353   return offset;
1354 }
1355 
1356 int Matcher::regnum_to_fpu_offset(int regnum) {
1357   return regnum - 32; // The FP registers are in the second chunk
1358 }
1359 
1360 // This is UltraSparc specific, true just means we have fast l2f conversion
1361 const bool Matcher::convL2FSupported(void) {
1362   return true;
1363 }
1364 
1365 // Is this branch offset short enough that a short branch can be used?
1366 //
1367 // NOTE: If the platform does not provide any short branch variants, then
1368 //       this method should return false for offset 0.
1369 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
1370   // The passed offset is relative to address of the branch.
1371   // On 86 a branch displacement is calculated relative to address
1372   // of a next instruction.
1373   offset -= br_size;
1374 
1375   // the short version of jmpConUCF2 contains multiple branches,
1376   // making the reach slightly less
1377   if (rule == jmpConUCF2_rule)
1378     return (-126 <= offset && offset <= 125);
1379   return (-128 <= offset && offset <= 127);
1380 }
1381 
1382 const bool Matcher::isSimpleConstant64(jlong value) {
1383   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
1384   return false;
1385 }
1386 
1387 // The ecx parameter to rep stos for the ClearArray node is in dwords.
1388 const bool Matcher::init_array_count_is_in_bytes = false;
1389 
1390 // Threshold size for cleararray.
1391 const int Matcher::init_array_short_size = 8 * BytesPerLong;
1392 
1393 // Needs 2 CMOV's for longs.
1394 const int Matcher::long_cmove_cost() { return 1; }
1395 
1396 // No CMOVF/CMOVD with SSE/SSE2
1397 const int Matcher::float_cmove_cost() { return (UseSSE>=1) ? ConditionalMoveLimit : 0; }
1398 
1399 // Does the CPU require late expand (see block.cpp for description of late expand)?
1400 const bool Matcher::require_postalloc_expand = false;
1401 
1402 // Should the Matcher clone shifts on addressing modes, expecting them to
1403 // be subsumed into complex addressing expressions or compute them into
1404 // registers?  True for Intel but false for most RISCs
1405 const bool Matcher::clone_shift_expressions = true;
1406 
1407 // Do we need to mask the count passed to shift instructions or does
1408 // the cpu only look at the lower 5/6 bits anyway?
1409 const bool Matcher::need_masked_shift_count = false;
1410 
1411 bool Matcher::narrow_oop_use_complex_address() {
1412   ShouldNotCallThis();
1413   return true;
1414 }
1415 
1416 bool Matcher::narrow_klass_use_complex_address() {
1417   ShouldNotCallThis();
1418   return true;
1419 }
1420 
1421 
1422 // Is it better to copy float constants, or load them directly from memory?
1423 // Intel can load a float constant from a direct address, requiring no
1424 // extra registers.  Most RISCs will have to materialize an address into a
1425 // register first, so they would do better to copy the constant from stack.
1426 const bool Matcher::rematerialize_float_constants = true;
1427 
1428 // If CPU can load and store mis-aligned doubles directly then no fixup is
1429 // needed.  Else we split the double into 2 integer pieces and move it
1430 // piece-by-piece.  Only happens when passing doubles into C code as the
1431 // Java calling convention forces doubles to be aligned.
1432 const bool Matcher::misaligned_doubles_ok = true;
1433 
1434 
1435 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
1436   // Get the memory operand from the node
1437   uint numopnds = node->num_opnds();        // Virtual call for number of operands
1438   uint skipped  = node->oper_input_base();  // Sum of leaves skipped so far
1439   assert( idx >= skipped, "idx too low in pd_implicit_null_fixup" );
1440   uint opcnt     = 1;                 // First operand
1441   uint num_edges = node->_opnds[1]->num_edges(); // leaves for first operand
1442   while( idx >= skipped+num_edges ) {
1443     skipped += num_edges;
1444     opcnt++;                          // Bump operand count
1445     assert( opcnt < numopnds, "Accessing non-existent operand" );
1446     num_edges = node->_opnds[opcnt]->num_edges(); // leaves for next operand
1447   }
1448 
1449   MachOper *memory = node->_opnds[opcnt];
1450   MachOper *new_memory = NULL;
1451   switch (memory->opcode()) {
1452   case DIRECT:
1453   case INDOFFSET32X:
1454     // No transformation necessary.
1455     return;
1456   case INDIRECT:
1457     new_memory = new (C) indirect_win95_safeOper( );
1458     break;
1459   case INDOFFSET8:
1460     new_memory = new (C) indOffset8_win95_safeOper(memory->disp(NULL, NULL, 0));
1461     break;
1462   case INDOFFSET32:
1463     new_memory = new (C) indOffset32_win95_safeOper(memory->disp(NULL, NULL, 0));
1464     break;
1465   case INDINDEXOFFSET:
1466     new_memory = new (C) indIndexOffset_win95_safeOper(memory->disp(NULL, NULL, 0));
1467     break;
1468   case INDINDEXSCALE:
1469     new_memory = new (C) indIndexScale_win95_safeOper(memory->scale());
1470     break;
1471   case INDINDEXSCALEOFFSET:
1472     new_memory = new (C) indIndexScaleOffset_win95_safeOper(memory->scale(), memory->disp(NULL, NULL, 0));
1473     break;
1474   case LOAD_LONG_INDIRECT:
1475   case LOAD_LONG_INDOFFSET32:
1476     // Does not use EBP as address register, use { EDX, EBX, EDI, ESI}
1477     return;
1478   default:
1479     assert(false, "unexpected memory operand in pd_implicit_null_fixup()");
1480     return;
1481   }
1482   node->_opnds[opcnt] = new_memory;
1483 }
1484 
1485 // Advertise here if the CPU requires explicit rounding operations
1486 // to implement the UseStrictFP mode.
1487 const bool Matcher::strict_fp_requires_explicit_rounding = true;
1488 
1489 // Are floats conerted to double when stored to stack during deoptimization?
1490 // On x32 it is stored with convertion only when FPU is used for floats.
1491 bool Matcher::float_in_double() { return (UseSSE == 0); }
1492 
1493 // Do ints take an entire long register or just half?
1494 const bool Matcher::int_in_long = false;
1495 
1496 // Return whether or not this register is ever used as an argument.  This
1497 // function is used on startup to build the trampoline stubs in generateOptoStub.
1498 // Registers not mentioned will be killed by the VM call in the trampoline, and
1499 // arguments in those registers not be available to the callee.
1500 bool Matcher::can_be_java_arg( int reg ) {
1501   if(  reg == ECX_num   || reg == EDX_num   ) return true;
1502   if( (reg == XMM0_num  || reg == XMM1_num ) && UseSSE>=1 ) return true;
1503   if( (reg == XMM0b_num || reg == XMM1b_num) && UseSSE>=2 ) return true;
1504   return false;
1505 }
1506 
1507 bool Matcher::is_spillable_arg( int reg ) {
1508   return can_be_java_arg(reg);
1509 }
1510 
1511 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
1512   // Use hardware integer DIV instruction when
1513   // it is faster than a code which use multiply.
1514   // Only when constant divisor fits into 32 bit
1515   // (min_jint is excluded to get only correct
1516   // positive 32 bit values from negative).
1517   return VM_Version::has_fast_idiv() &&
1518          (divisor == (int)divisor && divisor != min_jint);
1519 }
1520 
1521 // Register for DIVI projection of divmodI
1522 RegMask Matcher::divI_proj_mask() {
1523   return EAX_REG_mask();
1524 }
1525 
1526 // Register for MODI projection of divmodI
1527 RegMask Matcher::modI_proj_mask() {
1528   return EDX_REG_mask();
1529 }
1530 
1531 // Register for DIVL projection of divmodL
1532 RegMask Matcher::divL_proj_mask() {
1533   ShouldNotReachHere();
1534   return RegMask();
1535 }
1536 
1537 // Register for MODL projection of divmodL
1538 RegMask Matcher::modL_proj_mask() {
1539   ShouldNotReachHere();
1540   return RegMask();
1541 }
1542 
1543 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
1544   return EBP_REG_mask();
1545 }
1546 
1547 // Returns true if the high 32 bits of the value is known to be zero.
1548 bool is_operand_hi32_zero(Node* n) {
1549   int opc = n->Opcode();
1550   if (opc == Op_AndL) {
1551     Node* o2 = n->in(2);
1552     if (o2->is_Con() && (o2->get_long() & 0xFFFFFFFF00000000LL) == 0LL) {
1553       return true;
1554     }
1555   }
1556   if (opc == Op_ConL && (n->get_long() & 0xFFFFFFFF00000000LL) == 0LL) {
1557     return true;
1558   }
1559   return false;
1560 }
1561 
1562 %}
1563 
1564 //----------ENCODING BLOCK-----------------------------------------------------
1565 // This block specifies the encoding classes used by the compiler to output
1566 // byte streams.  Encoding classes generate functions which are called by
1567 // Machine Instruction Nodes in order to generate the bit encoding of the
1568 // instruction.  Operands specify their base encoding interface with the
1569 // interface keyword.  There are currently supported four interfaces,
1570 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER.  REG_INTER causes an
1571 // operand to generate a function which returns its register number when
1572 // queried.   CONST_INTER causes an operand to generate a function which
1573 // returns the value of the constant when queried.  MEMORY_INTER causes an
1574 // operand to generate four functions which return the Base Register, the
1575 // Index Register, the Scale Value, and the Offset Value of the operand when
1576 // queried.  COND_INTER causes an operand to generate six functions which
1577 // return the encoding code (ie - encoding bits for the instruction)
1578 // associated with each basic boolean condition for a conditional instruction.
1579 // Instructions specify two basic values for encoding.  They use the
1580 // ins_encode keyword to specify their encoding class (which must be one of
1581 // the class names specified in the encoding block), and they use the
1582 // opcode keyword to specify, in order, their primary, secondary, and
1583 // tertiary opcode.  Only the opcode sections which a particular instruction
1584 // needs for encoding need to be specified.
1585 encode %{
1586   // Build emit functions for each basic byte or larger field in the intel
1587   // encoding scheme (opcode, rm, sib, immediate), and call them from C++
1588   // code in the enc_class source block.  Emit functions will live in the
1589   // main source block for now.  In future, we can generalize this by
1590   // adding a syntax that specifies the sizes of fields in an order,
1591   // so that the adlc can build the emit functions automagically
1592 
1593   // Emit primary opcode
1594   enc_class OpcP %{
1595     emit_opcode(cbuf, $primary);
1596   %}
1597 
1598   // Emit secondary opcode
1599   enc_class OpcS %{
1600     emit_opcode(cbuf, $secondary);
1601   %}
1602 
1603   // Emit opcode directly
1604   enc_class Opcode(immI d8) %{
1605     emit_opcode(cbuf, $d8$$constant);
1606   %}
1607 
1608   enc_class SizePrefix %{
1609     emit_opcode(cbuf,0x66);
1610   %}
1611 
1612   enc_class RegReg (rRegI dst, rRegI src) %{    // RegReg(Many)
1613     emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
1614   %}
1615 
1616   enc_class OpcRegReg (immI opcode, rRegI dst, rRegI src) %{    // OpcRegReg(Many)
1617     emit_opcode(cbuf,$opcode$$constant);
1618     emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
1619   %}
1620 
1621   enc_class mov_r32_imm0( rRegI dst ) %{
1622     emit_opcode( cbuf, 0xB8 + $dst$$reg ); // 0xB8+ rd   -- MOV r32  ,imm32
1623     emit_d32   ( cbuf, 0x0  );             //                         imm32==0x0
1624   %}
1625 
1626   enc_class cdq_enc %{
1627     // Full implementation of Java idiv and irem; checks for
1628     // special case as described in JVM spec., p.243 & p.271.
1629     //
1630     //         normal case                           special case
1631     //
1632     // input : rax,: dividend                         min_int
1633     //         reg: divisor                          -1
1634     //
1635     // output: rax,: quotient  (= rax, idiv reg)       min_int
1636     //         rdx: remainder (= rax, irem reg)       0
1637     //
1638     //  Code sequnce:
1639     //
1640     //  81 F8 00 00 00 80    cmp         rax,80000000h
1641     //  0F 85 0B 00 00 00    jne         normal_case
1642     //  33 D2                xor         rdx,edx
1643     //  83 F9 FF             cmp         rcx,0FFh
1644     //  0F 84 03 00 00 00    je          done
1645     //                  normal_case:
1646     //  99                   cdq
1647     //  F7 F9                idiv        rax,ecx
1648     //                  done:
1649     //
1650     emit_opcode(cbuf,0x81); emit_d8(cbuf,0xF8);
1651     emit_opcode(cbuf,0x00); emit_d8(cbuf,0x00);
1652     emit_opcode(cbuf,0x00); emit_d8(cbuf,0x80);                     // cmp rax,80000000h
1653     emit_opcode(cbuf,0x0F); emit_d8(cbuf,0x85);
1654     emit_opcode(cbuf,0x0B); emit_d8(cbuf,0x00);
1655     emit_opcode(cbuf,0x00); emit_d8(cbuf,0x00);                     // jne normal_case
1656     emit_opcode(cbuf,0x33); emit_d8(cbuf,0xD2);                     // xor rdx,edx
1657     emit_opcode(cbuf,0x83); emit_d8(cbuf,0xF9); emit_d8(cbuf,0xFF); // cmp rcx,0FFh
1658     emit_opcode(cbuf,0x0F); emit_d8(cbuf,0x84);
1659     emit_opcode(cbuf,0x03); emit_d8(cbuf,0x00);
1660     emit_opcode(cbuf,0x00); emit_d8(cbuf,0x00);                     // je done
1661     // normal_case:
1662     emit_opcode(cbuf,0x99);                                         // cdq
1663     // idiv (note: must be emitted by the user of this rule)
1664     // normal:
1665   %}
1666 
1667   // Dense encoding for older common ops
1668   enc_class Opc_plus(immI opcode, rRegI reg) %{
1669     emit_opcode(cbuf, $opcode$$constant + $reg$$reg);
1670   %}
1671 
1672 
1673   // Opcde enc_class for 8/32 bit immediate instructions with sign-extension
1674   enc_class OpcSE (immI imm) %{ // Emit primary opcode and set sign-extend bit
1675     // Check for 8-bit immediate, and set sign extend bit in opcode
1676     if (($imm$$constant >= -128) && ($imm$$constant <= 127)) {
1677       emit_opcode(cbuf, $primary | 0x02);
1678     }
1679     else {                          // If 32-bit immediate
1680       emit_opcode(cbuf, $primary);
1681     }
1682   %}
1683 
1684   enc_class OpcSErm (rRegI dst, immI imm) %{    // OpcSEr/m
1685     // Emit primary opcode and set sign-extend bit
1686     // Check for 8-bit immediate, and set sign extend bit in opcode
1687     if (($imm$$constant >= -128) && ($imm$$constant <= 127)) {
1688       emit_opcode(cbuf, $primary | 0x02);    }
1689     else {                          // If 32-bit immediate
1690       emit_opcode(cbuf, $primary);
1691     }
1692     // Emit r/m byte with secondary opcode, after primary opcode.
1693     emit_rm(cbuf, 0x3, $secondary, $dst$$reg);
1694   %}
1695 
1696   enc_class Con8or32 (immI imm) %{    // Con8or32(storeImmI), 8 or 32 bits
1697     // Check for 8-bit immediate, and set sign extend bit in opcode
1698     if (($imm$$constant >= -128) && ($imm$$constant <= 127)) {
1699       $$$emit8$imm$$constant;
1700     }
1701     else {                          // If 32-bit immediate
1702       // Output immediate
1703       $$$emit32$imm$$constant;
1704     }
1705   %}
1706 
1707   enc_class Long_OpcSErm_Lo(eRegL dst, immL imm) %{
1708     // Emit primary opcode and set sign-extend bit
1709     // Check for 8-bit immediate, and set sign extend bit in opcode
1710     int con = (int)$imm$$constant; // Throw away top bits
1711     emit_opcode(cbuf, ((con >= -128) && (con <= 127)) ? ($primary | 0x02) : $primary);
1712     // Emit r/m byte with secondary opcode, after primary opcode.
1713     emit_rm(cbuf, 0x3, $secondary, $dst$$reg);
1714     if ((con >= -128) && (con <= 127)) emit_d8 (cbuf,con);
1715     else                               emit_d32(cbuf,con);
1716   %}
1717 
1718   enc_class Long_OpcSErm_Hi(eRegL dst, immL imm) %{
1719     // Emit primary opcode and set sign-extend bit
1720     // Check for 8-bit immediate, and set sign extend bit in opcode
1721     int con = (int)($imm$$constant >> 32); // Throw away bottom bits
1722     emit_opcode(cbuf, ((con >= -128) && (con <= 127)) ? ($primary | 0x02) : $primary);
1723     // Emit r/m byte with tertiary opcode, after primary opcode.
1724     emit_rm(cbuf, 0x3, $tertiary, HIGH_FROM_LOW($dst$$reg));
1725     if ((con >= -128) && (con <= 127)) emit_d8 (cbuf,con);
1726     else                               emit_d32(cbuf,con);
1727   %}
1728 
1729   enc_class OpcSReg (rRegI dst) %{    // BSWAP
1730     emit_cc(cbuf, $secondary, $dst$$reg );
1731   %}
1732 
1733   enc_class bswap_long_bytes(eRegL dst) %{ // BSWAP
1734     int destlo = $dst$$reg;
1735     int desthi = HIGH_FROM_LOW(destlo);
1736     // bswap lo
1737     emit_opcode(cbuf, 0x0F);
1738     emit_cc(cbuf, 0xC8, destlo);
1739     // bswap hi
1740     emit_opcode(cbuf, 0x0F);
1741     emit_cc(cbuf, 0xC8, desthi);
1742     // xchg lo and hi
1743     emit_opcode(cbuf, 0x87);
1744     emit_rm(cbuf, 0x3, destlo, desthi);
1745   %}
1746 
1747   enc_class RegOpc (rRegI div) %{    // IDIV, IMOD, JMP indirect, ...
1748     emit_rm(cbuf, 0x3, $secondary, $div$$reg );
1749   %}
1750 
1751   enc_class enc_cmov(cmpOp cop ) %{ // CMOV
1752     $$$emit8$primary;
1753     emit_cc(cbuf, $secondary, $cop$$cmpcode);
1754   %}
1755 
1756   enc_class enc_cmov_dpr(cmpOp cop, regDPR src ) %{ // CMOV
1757     int op = 0xDA00 + $cop$$cmpcode + ($src$$reg-1);
1758     emit_d8(cbuf, op >> 8 );
1759     emit_d8(cbuf, op & 255);
1760   %}
1761 
1762   // emulate a CMOV with a conditional branch around a MOV
1763   enc_class enc_cmov_branch( cmpOp cop, immI brOffs ) %{ // CMOV
1764     // Invert sense of branch from sense of CMOV
1765     emit_cc( cbuf, 0x70, ($cop$$cmpcode^1) );
1766     emit_d8( cbuf, $brOffs$$constant );
1767   %}
1768 
1769   enc_class enc_PartialSubtypeCheck( ) %{
1770     Register Redi = as_Register(EDI_enc); // result register
1771     Register Reax = as_Register(EAX_enc); // super class
1772     Register Recx = as_Register(ECX_enc); // killed
1773     Register Resi = as_Register(ESI_enc); // sub class
1774     Label miss;
1775 
1776     MacroAssembler _masm(&cbuf);
1777     __ check_klass_subtype_slow_path(Resi, Reax, Recx, Redi,
1778                                      NULL, &miss,
1779                                      /*set_cond_codes:*/ true);
1780     if ($primary) {
1781       __ xorptr(Redi, Redi);
1782     }
1783     __ bind(miss);
1784   %}
1785 
1786   enc_class FFree_Float_Stack_All %{    // Free_Float_Stack_All
1787     MacroAssembler masm(&cbuf);
1788     int start = masm.offset();
1789     if (UseSSE >= 2) {
1790       if (VerifyFPU) {
1791         masm.verify_FPU(0, "must be empty in SSE2+ mode");
1792       }
1793     } else {
1794       // External c_calling_convention expects the FPU stack to be 'clean'.
1795       // Compiled code leaves it dirty.  Do cleanup now.
1796       masm.empty_FPU_stack();
1797     }
1798     if (sizeof_FFree_Float_Stack_All == -1) {
1799       sizeof_FFree_Float_Stack_All = masm.offset() - start;
1800     } else {
1801       assert(masm.offset() - start == sizeof_FFree_Float_Stack_All, "wrong size");
1802     }
1803   %}
1804 
1805   enc_class Verify_FPU_For_Leaf %{
1806     if( VerifyFPU ) {
1807       MacroAssembler masm(&cbuf);
1808       masm.verify_FPU( -3, "Returning from Runtime Leaf call");
1809     }
1810   %}
1811 
1812   enc_class Java_To_Runtime (method meth) %{    // CALL Java_To_Runtime, Java_To_Runtime_Leaf
1813     // This is the instruction starting address for relocation info.
1814     cbuf.set_insts_mark();
1815     $$$emit8$primary;
1816     // CALL directly to the runtime
1817     emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
1818                 runtime_call_Relocation::spec(), RELOC_IMM32 );
1819 
1820     if (UseSSE >= 2) {
1821       MacroAssembler _masm(&cbuf);
1822       BasicType rt = tf()->return_type();
1823 
1824       if ((rt == T_FLOAT || rt == T_DOUBLE) && !return_value_is_used()) {
1825         // A C runtime call where the return value is unused.  In SSE2+
1826         // mode the result needs to be removed from the FPU stack.  It's
1827         // likely that this function call could be removed by the
1828         // optimizer if the C function is a pure function.
1829         __ ffree(0);
1830       } else if (rt == T_FLOAT) {
1831         __ lea(rsp, Address(rsp, -4));
1832         __ fstp_s(Address(rsp, 0));
1833         __ movflt(xmm0, Address(rsp, 0));
1834         __ lea(rsp, Address(rsp,  4));
1835       } else if (rt == T_DOUBLE) {
1836         __ lea(rsp, Address(rsp, -8));
1837         __ fstp_d(Address(rsp, 0));
1838         __ movdbl(xmm0, Address(rsp, 0));
1839         __ lea(rsp, Address(rsp,  8));
1840       }
1841     }
1842   %}
1843 
1844 
1845   enc_class pre_call_resets %{
1846     // If method sets FPU control word restore it here
1847     debug_only(int off0 = cbuf.insts_size());
1848     if (ra_->C->in_24_bit_fp_mode()) {
1849       MacroAssembler _masm(&cbuf);
1850       __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
1851     }
1852     if (ra_->C->max_vector_size() > 16) {
1853       // Clear upper bits of YMM registers when current compiled code uses
1854       // wide vectors to avoid AVX <-> SSE transition penalty during call.
1855       MacroAssembler _masm(&cbuf);
1856       __ vzeroupper();
1857     }
1858     debug_only(int off1 = cbuf.insts_size());
1859     assert(off1 - off0 == pre_call_resets_size(), "correct size prediction");
1860   %}
1861 
1862   enc_class post_call_FPU %{
1863     // If method sets FPU control word do it here also
1864     if (Compile::current()->in_24_bit_fp_mode()) {
1865       MacroAssembler masm(&cbuf);
1866       masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
1867     }
1868   %}
1869 
1870   enc_class Java_Static_Call (method meth) %{    // JAVA STATIC CALL
1871     // CALL to fixup routine.  Fixup routine uses ScopeDesc info to determine
1872     // who we intended to call.
1873     cbuf.set_insts_mark();
1874     $$$emit8$primary;
1875     if (!_method) {
1876       emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
1877                      runtime_call_Relocation::spec(), RELOC_IMM32 );
1878     } else if (_optimized_virtual) {
1879       emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
1880                      opt_virtual_call_Relocation::spec(), RELOC_IMM32 );
1881     } else {
1882       emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
1883                      static_call_Relocation::spec(), RELOC_IMM32 );
1884     }
1885     if (_method) {  // Emit stub for static call.
1886       CompiledStaticCall::emit_to_interp_stub(cbuf);
1887     }
1888   %}
1889 
1890   enc_class Java_Dynamic_Call (method meth) %{    // JAVA DYNAMIC CALL
1891     MacroAssembler _masm(&cbuf);
1892     __ ic_call((address)$meth$$method);
1893   %}
1894 
1895   enc_class Java_Compiled_Call (method meth) %{    // JAVA COMPILED CALL
1896     int disp = in_bytes(Method::from_compiled_offset());
1897     assert( -128 <= disp && disp <= 127, "compiled_code_offset isn't small");
1898 
1899     // CALL *[EAX+in_bytes(Method::from_compiled_code_entry_point_offset())]
1900     cbuf.set_insts_mark();
1901     $$$emit8$primary;
1902     emit_rm(cbuf, 0x01, $secondary, EAX_enc );  // R/M byte
1903     emit_d8(cbuf, disp);             // Displacement
1904 
1905   %}
1906 
1907 //   Following encoding is no longer used, but may be restored if calling
1908 //   convention changes significantly.
1909 //   Became: Xor_Reg(EBP), Java_To_Runtime( labl )
1910 //
1911 //   enc_class Java_Interpreter_Call (label labl) %{    // JAVA INTERPRETER CALL
1912 //     // int ic_reg     = Matcher::inline_cache_reg();
1913 //     // int ic_encode  = Matcher::_regEncode[ic_reg];
1914 //     // int imo_reg    = Matcher::interpreter_method_oop_reg();
1915 //     // int imo_encode = Matcher::_regEncode[imo_reg];
1916 //
1917 //     // // Interpreter expects method_oop in EBX, currently a callee-saved register,
1918 //     // // so we load it immediately before the call
1919 //     // emit_opcode(cbuf, 0x8B);                     // MOV    imo_reg,ic_reg  # method_oop
1920 //     // emit_rm(cbuf, 0x03, imo_encode, ic_encode ); // R/M byte
1921 //
1922 //     // xor rbp,ebp
1923 //     emit_opcode(cbuf, 0x33);
1924 //     emit_rm(cbuf, 0x3, EBP_enc, EBP_enc);
1925 //
1926 //     // CALL to interpreter.
1927 //     cbuf.set_insts_mark();
1928 //     $$$emit8$primary;
1929 //     emit_d32_reloc(cbuf, ($labl$$label - (int)(cbuf.insts_end()) - 4),
1930 //                 runtime_call_Relocation::spec(), RELOC_IMM32 );
1931 //   %}
1932 
1933   enc_class RegOpcImm (rRegI dst, immI8 shift) %{    // SHL, SAR, SHR
1934     $$$emit8$primary;
1935     emit_rm(cbuf, 0x3, $secondary, $dst$$reg);
1936     $$$emit8$shift$$constant;
1937   %}
1938 
1939   enc_class LdImmI (rRegI dst, immI src) %{    // Load Immediate
1940     // Load immediate does not have a zero or sign extended version
1941     // for 8-bit immediates
1942     emit_opcode(cbuf, 0xB8 + $dst$$reg);
1943     $$$emit32$src$$constant;
1944   %}
1945 
1946   enc_class LdImmP (rRegI dst, immI src) %{    // Load Immediate
1947     // Load immediate does not have a zero or sign extended version
1948     // for 8-bit immediates
1949     emit_opcode(cbuf, $primary + $dst$$reg);
1950     $$$emit32$src$$constant;
1951   %}
1952 
1953   enc_class LdImmL_Lo( eRegL dst, immL src) %{    // Load Immediate
1954     // Load immediate does not have a zero or sign extended version
1955     // for 8-bit immediates
1956     int dst_enc = $dst$$reg;
1957     int src_con = $src$$constant & 0x0FFFFFFFFL;
1958     if (src_con == 0) {
1959       // xor dst, dst
1960       emit_opcode(cbuf, 0x33);
1961       emit_rm(cbuf, 0x3, dst_enc, dst_enc);
1962     } else {
1963       emit_opcode(cbuf, $primary + dst_enc);
1964       emit_d32(cbuf, src_con);
1965     }
1966   %}
1967 
1968   enc_class LdImmL_Hi( eRegL dst, immL src) %{    // Load Immediate
1969     // Load immediate does not have a zero or sign extended version
1970     // for 8-bit immediates
1971     int dst_enc = $dst$$reg + 2;
1972     int src_con = ((julong)($src$$constant)) >> 32;
1973     if (src_con == 0) {
1974       // xor dst, dst
1975       emit_opcode(cbuf, 0x33);
1976       emit_rm(cbuf, 0x3, dst_enc, dst_enc);
1977     } else {
1978       emit_opcode(cbuf, $primary + dst_enc);
1979       emit_d32(cbuf, src_con);
1980     }
1981   %}
1982 
1983 
1984   // Encode a reg-reg copy.  If it is useless, then empty encoding.
1985   enc_class enc_Copy( rRegI dst, rRegI src ) %{
1986     encode_Copy( cbuf, $dst$$reg, $src$$reg );
1987   %}
1988 
1989   enc_class enc_CopyL_Lo( rRegI dst, eRegL src ) %{
1990     encode_Copy( cbuf, $dst$$reg, $src$$reg );
1991   %}
1992 
1993   enc_class RegReg (rRegI dst, rRegI src) %{    // RegReg(Many)
1994     emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
1995   %}
1996 
1997   enc_class RegReg_Lo(eRegL dst, eRegL src) %{    // RegReg(Many)
1998     $$$emit8$primary;
1999     emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
2000   %}
2001 
2002   enc_class RegReg_Hi(eRegL dst, eRegL src) %{    // RegReg(Many)
2003     $$$emit8$secondary;
2004     emit_rm(cbuf, 0x3, HIGH_FROM_LOW($dst$$reg), HIGH_FROM_LOW($src$$reg));
2005   %}
2006 
2007   enc_class RegReg_Lo2(eRegL dst, eRegL src) %{    // RegReg(Many)
2008     emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
2009   %}
2010 
2011   enc_class RegReg_Hi2(eRegL dst, eRegL src) %{    // RegReg(Many)
2012     emit_rm(cbuf, 0x3, HIGH_FROM_LOW($dst$$reg), HIGH_FROM_LOW($src$$reg));
2013   %}
2014 
2015   enc_class RegReg_HiLo( eRegL src, rRegI dst ) %{
2016     emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW($src$$reg));
2017   %}
2018 
2019   enc_class Con32 (immI src) %{    // Con32(storeImmI)
2020     // Output immediate
2021     $$$emit32$src$$constant;
2022   %}
2023 
2024   enc_class Con32FPR_as_bits(immFPR src) %{        // storeF_imm
2025     // Output Float immediate bits
2026     jfloat jf = $src$$constant;
2027     int    jf_as_bits = jint_cast( jf );
2028     emit_d32(cbuf, jf_as_bits);
2029   %}
2030 
2031   enc_class Con32F_as_bits(immF src) %{      // storeX_imm
2032     // Output Float immediate bits
2033     jfloat jf = $src$$constant;
2034     int    jf_as_bits = jint_cast( jf );
2035     emit_d32(cbuf, jf_as_bits);
2036   %}
2037 
2038   enc_class Con16 (immI src) %{    // Con16(storeImmI)
2039     // Output immediate
2040     $$$emit16$src$$constant;
2041   %}
2042 
2043   enc_class Con_d32(immI src) %{
2044     emit_d32(cbuf,$src$$constant);
2045   %}
2046 
2047   enc_class conmemref (eRegP t1) %{    // Con32(storeImmI)
2048     // Output immediate memory reference
2049     emit_rm(cbuf, 0x00, $t1$$reg, 0x05 );
2050     emit_d32(cbuf, 0x00);
2051   %}
2052 
2053   enc_class lock_prefix( ) %{
2054     if( os::is_MP() )
2055       emit_opcode(cbuf,0xF0);         // [Lock]
2056   %}
2057 
2058   // Cmp-xchg long value.
2059   // Note: we need to swap rbx, and rcx before and after the
2060   //       cmpxchg8 instruction because the instruction uses
2061   //       rcx as the high order word of the new value to store but
2062   //       our register encoding uses rbx,.
2063   enc_class enc_cmpxchg8(eSIRegP mem_ptr) %{
2064 
2065     // XCHG  rbx,ecx
2066     emit_opcode(cbuf,0x87);
2067     emit_opcode(cbuf,0xD9);
2068     // [Lock]
2069     if( os::is_MP() )
2070       emit_opcode(cbuf,0xF0);
2071     // CMPXCHG8 [Eptr]
2072     emit_opcode(cbuf,0x0F);
2073     emit_opcode(cbuf,0xC7);
2074     emit_rm( cbuf, 0x0, 1, $mem_ptr$$reg );
2075     // XCHG  rbx,ecx
2076     emit_opcode(cbuf,0x87);
2077     emit_opcode(cbuf,0xD9);
2078   %}
2079 
2080   enc_class enc_cmpxchg(eSIRegP mem_ptr) %{
2081     // [Lock]
2082     if( os::is_MP() )
2083       emit_opcode(cbuf,0xF0);
2084 
2085     // CMPXCHG [Eptr]
2086     emit_opcode(cbuf,0x0F);
2087     emit_opcode(cbuf,0xB1);
2088     emit_rm( cbuf, 0x0, 1, $mem_ptr$$reg );
2089   %}
2090 
2091   enc_class enc_flags_ne_to_boolean( iRegI res ) %{
2092     int res_encoding = $res$$reg;
2093 
2094     // MOV  res,0
2095     emit_opcode( cbuf, 0xB8 + res_encoding);
2096     emit_d32( cbuf, 0 );
2097     // JNE,s  fail
2098     emit_opcode(cbuf,0x75);
2099     emit_d8(cbuf, 5 );
2100     // MOV  res,1
2101     emit_opcode( cbuf, 0xB8 + res_encoding);
2102     emit_d32( cbuf, 1 );
2103     // fail:
2104   %}
2105 
2106   enc_class set_instruction_start( ) %{
2107     cbuf.set_insts_mark();            // Mark start of opcode for reloc info in mem operand
2108   %}
2109 
2110   enc_class RegMem (rRegI ereg, memory mem) %{    // emit_reg_mem
2111     int reg_encoding = $ereg$$reg;
2112     int base  = $mem$$base;
2113     int index = $mem$$index;
2114     int scale = $mem$$scale;
2115     int displace = $mem$$disp;
2116     relocInfo::relocType disp_reloc = $mem->disp_reloc();
2117     encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_reloc);
2118   %}
2119 
2120   enc_class RegMem_Hi(eRegL ereg, memory mem) %{    // emit_reg_mem
2121     int reg_encoding = HIGH_FROM_LOW($ereg$$reg);  // Hi register of pair, computed from lo
2122     int base  = $mem$$base;
2123     int index = $mem$$index;
2124     int scale = $mem$$scale;
2125     int displace = $mem$$disp + 4;      // Offset is 4 further in memory
2126     assert( $mem->disp_reloc() == relocInfo::none, "Cannot add 4 to oop" );
2127     encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, relocInfo::none);
2128   %}
2129 
2130   enc_class move_long_small_shift( eRegL dst, immI_1_31 cnt ) %{
2131     int r1, r2;
2132     if( $tertiary == 0xA4 ) { r1 = $dst$$reg;  r2 = HIGH_FROM_LOW($dst$$reg); }
2133     else                    { r2 = $dst$$reg;  r1 = HIGH_FROM_LOW($dst$$reg); }
2134     emit_opcode(cbuf,0x0F);
2135     emit_opcode(cbuf,$tertiary);
2136     emit_rm(cbuf, 0x3, r1, r2);
2137     emit_d8(cbuf,$cnt$$constant);
2138     emit_d8(cbuf,$primary);
2139     emit_rm(cbuf, 0x3, $secondary, r1);
2140     emit_d8(cbuf,$cnt$$constant);
2141   %}
2142 
2143   enc_class move_long_big_shift_sign( eRegL dst, immI_32_63 cnt ) %{
2144     emit_opcode( cbuf, 0x8B ); // Move
2145     emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW($dst$$reg));
2146     if( $cnt$$constant > 32 ) { // Shift, if not by zero
2147       emit_d8(cbuf,$primary);
2148       emit_rm(cbuf, 0x3, $secondary, $dst$$reg);
2149       emit_d8(cbuf,$cnt$$constant-32);
2150     }
2151     emit_d8(cbuf,$primary);
2152     emit_rm(cbuf, 0x3, $secondary, HIGH_FROM_LOW($dst$$reg));
2153     emit_d8(cbuf,31);
2154   %}
2155 
2156   enc_class move_long_big_shift_clr( eRegL dst, immI_32_63 cnt ) %{
2157     int r1, r2;
2158     if( $secondary == 0x5 ) { r1 = $dst$$reg;  r2 = HIGH_FROM_LOW($dst$$reg); }
2159     else                    { r2 = $dst$$reg;  r1 = HIGH_FROM_LOW($dst$$reg); }
2160 
2161     emit_opcode( cbuf, 0x8B ); // Move r1,r2
2162     emit_rm(cbuf, 0x3, r1, r2);
2163     if( $cnt$$constant > 32 ) { // Shift, if not by zero
2164       emit_opcode(cbuf,$primary);
2165       emit_rm(cbuf, 0x3, $secondary, r1);
2166       emit_d8(cbuf,$cnt$$constant-32);
2167     }
2168     emit_opcode(cbuf,0x33);  // XOR r2,r2
2169     emit_rm(cbuf, 0x3, r2, r2);
2170   %}
2171 
2172   // Clone of RegMem but accepts an extra parameter to access each
2173   // half of a double in memory; it never needs relocation info.
2174   enc_class Mov_MemD_half_to_Reg (immI opcode, memory mem, immI disp_for_half, rRegI rm_reg) %{
2175     emit_opcode(cbuf,$opcode$$constant);
2176     int reg_encoding = $rm_reg$$reg;
2177     int base     = $mem$$base;
2178     int index    = $mem$$index;
2179     int scale    = $mem$$scale;
2180     int displace = $mem$$disp + $disp_for_half$$constant;
2181     relocInfo::relocType disp_reloc = relocInfo::none;
2182     encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_reloc);
2183   %}
2184 
2185   // !!!!! Special Custom Code used by MemMove, and stack access instructions !!!!!
2186   //
2187   // Clone of RegMem except the RM-byte's reg/opcode field is an ADLC-time constant
2188   // and it never needs relocation information.
2189   // Frequently used to move data between FPU's Stack Top and memory.
2190   enc_class RMopc_Mem_no_oop (immI rm_opcode, memory mem) %{
2191     int rm_byte_opcode = $rm_opcode$$constant;
2192     int base     = $mem$$base;
2193     int index    = $mem$$index;
2194     int scale    = $mem$$scale;
2195     int displace = $mem$$disp;
2196     assert( $mem->disp_reloc() == relocInfo::none, "No oops here because no reloc info allowed" );
2197     encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace, relocInfo::none);
2198   %}
2199 
2200   enc_class RMopc_Mem (immI rm_opcode, memory mem) %{
2201     int rm_byte_opcode = $rm_opcode$$constant;
2202     int base     = $mem$$base;
2203     int index    = $mem$$index;
2204     int scale    = $mem$$scale;
2205     int displace = $mem$$disp;
2206     relocInfo::relocType disp_reloc = $mem->disp_reloc(); // disp-as-oop when working with static globals
2207     encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace, disp_reloc);
2208   %}
2209 
2210   enc_class RegLea (rRegI dst, rRegI src0, immI src1 ) %{    // emit_reg_lea
2211     int reg_encoding = $dst$$reg;
2212     int base         = $src0$$reg;      // 0xFFFFFFFF indicates no base
2213     int index        = 0x04;            // 0x04 indicates no index
2214     int scale        = 0x00;            // 0x00 indicates no scale
2215     int displace     = $src1$$constant; // 0x00 indicates no displacement
2216     relocInfo::relocType disp_reloc = relocInfo::none;
2217     encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_reloc);
2218   %}
2219 
2220   enc_class min_enc (rRegI dst, rRegI src) %{    // MIN
2221     // Compare dst,src
2222     emit_opcode(cbuf,0x3B);
2223     emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
2224     // jmp dst < src around move
2225     emit_opcode(cbuf,0x7C);
2226     emit_d8(cbuf,2);
2227     // move dst,src
2228     emit_opcode(cbuf,0x8B);
2229     emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
2230   %}
2231 
2232   enc_class max_enc (rRegI dst, rRegI src) %{    // MAX
2233     // Compare dst,src
2234     emit_opcode(cbuf,0x3B);
2235     emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
2236     // jmp dst > src around move
2237     emit_opcode(cbuf,0x7F);
2238     emit_d8(cbuf,2);
2239     // move dst,src
2240     emit_opcode(cbuf,0x8B);
2241     emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
2242   %}
2243 
2244   enc_class enc_FPR_store(memory mem, regDPR src) %{
2245     // If src is FPR1, we can just FST to store it.
2246     // Else we need to FLD it to FPR1, then FSTP to store/pop it.
2247     int reg_encoding = 0x2; // Just store
2248     int base  = $mem$$base;
2249     int index = $mem$$index;
2250     int scale = $mem$$scale;
2251     int displace = $mem$$disp;
2252     relocInfo::relocType disp_reloc = $mem->disp_reloc(); // disp-as-oop when working with static globals
2253     if( $src$$reg != FPR1L_enc ) {
2254       reg_encoding = 0x3;  // Store & pop
2255       emit_opcode( cbuf, 0xD9 ); // FLD (i.e., push it)
2256       emit_d8( cbuf, 0xC0-1+$src$$reg );
2257     }
2258     cbuf.set_insts_mark();       // Mark start of opcode for reloc info in mem operand
2259     emit_opcode(cbuf,$primary);
2260     encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_reloc);
2261   %}
2262 
2263   enc_class neg_reg(rRegI dst) %{
2264     // NEG $dst
2265     emit_opcode(cbuf,0xF7);
2266     emit_rm(cbuf, 0x3, 0x03, $dst$$reg );
2267   %}
2268 
2269   enc_class setLT_reg(eCXRegI dst) %{
2270     // SETLT $dst
2271     emit_opcode(cbuf,0x0F);
2272     emit_opcode(cbuf,0x9C);
2273     emit_rm( cbuf, 0x3, 0x4, $dst$$reg );
2274   %}
2275 
2276   enc_class enc_cmpLTP(ncxRegI p, ncxRegI q, ncxRegI y, eCXRegI tmp) %{    // cadd_cmpLT
2277     int tmpReg = $tmp$$reg;
2278 
2279     // SUB $p,$q
2280     emit_opcode(cbuf,0x2B);
2281     emit_rm(cbuf, 0x3, $p$$reg, $q$$reg);
2282     // SBB $tmp,$tmp
2283     emit_opcode(cbuf,0x1B);
2284     emit_rm(cbuf, 0x3, tmpReg, tmpReg);
2285     // AND $tmp,$y
2286     emit_opcode(cbuf,0x23);
2287     emit_rm(cbuf, 0x3, tmpReg, $y$$reg);
2288     // ADD $p,$tmp
2289     emit_opcode(cbuf,0x03);
2290     emit_rm(cbuf, 0x3, $p$$reg, tmpReg);
2291   %}
2292 
2293   enc_class shift_left_long( eRegL dst, eCXRegI shift ) %{
2294     // TEST shift,32
2295     emit_opcode(cbuf,0xF7);
2296     emit_rm(cbuf, 0x3, 0, ECX_enc);
2297     emit_d32(cbuf,0x20);
2298     // JEQ,s small
2299     emit_opcode(cbuf, 0x74);
2300     emit_d8(cbuf, 0x04);
2301     // MOV    $dst.hi,$dst.lo
2302     emit_opcode( cbuf, 0x8B );
2303     emit_rm(cbuf, 0x3, HIGH_FROM_LOW($dst$$reg), $dst$$reg );
2304     // CLR    $dst.lo
2305     emit_opcode(cbuf, 0x33);
2306     emit_rm(cbuf, 0x3, $dst$$reg, $dst$$reg);
2307 // small:
2308     // SHLD   $dst.hi,$dst.lo,$shift
2309     emit_opcode(cbuf,0x0F);
2310     emit_opcode(cbuf,0xA5);
2311     emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW($dst$$reg));
2312     // SHL    $dst.lo,$shift"
2313     emit_opcode(cbuf,0xD3);
2314     emit_rm(cbuf, 0x3, 0x4, $dst$$reg );
2315   %}
2316 
2317   enc_class shift_right_long( eRegL dst, eCXRegI shift ) %{
2318     // TEST shift,32
2319     emit_opcode(cbuf,0xF7);
2320     emit_rm(cbuf, 0x3, 0, ECX_enc);
2321     emit_d32(cbuf,0x20);
2322     // JEQ,s small
2323     emit_opcode(cbuf, 0x74);
2324     emit_d8(cbuf, 0x04);
2325     // MOV    $dst.lo,$dst.hi
2326     emit_opcode( cbuf, 0x8B );
2327     emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW($dst$$reg) );
2328     // CLR    $dst.hi
2329     emit_opcode(cbuf, 0x33);
2330     emit_rm(cbuf, 0x3, HIGH_FROM_LOW($dst$$reg), HIGH_FROM_LOW($dst$$reg));
2331 // small:
2332     // SHRD   $dst.lo,$dst.hi,$shift
2333     emit_opcode(cbuf,0x0F);
2334     emit_opcode(cbuf,0xAD);
2335     emit_rm(cbuf, 0x3, HIGH_FROM_LOW($dst$$reg), $dst$$reg);
2336     // SHR    $dst.hi,$shift"
2337     emit_opcode(cbuf,0xD3);
2338     emit_rm(cbuf, 0x3, 0x5, HIGH_FROM_LOW($dst$$reg) );
2339   %}
2340 
2341   enc_class shift_right_arith_long( eRegL dst, eCXRegI shift ) %{
2342     // TEST shift,32
2343     emit_opcode(cbuf,0xF7);
2344     emit_rm(cbuf, 0x3, 0, ECX_enc);
2345     emit_d32(cbuf,0x20);
2346     // JEQ,s small
2347     emit_opcode(cbuf, 0x74);
2348     emit_d8(cbuf, 0x05);
2349     // MOV    $dst.lo,$dst.hi
2350     emit_opcode( cbuf, 0x8B );
2351     emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW($dst$$reg) );
2352     // SAR    $dst.hi,31
2353     emit_opcode(cbuf, 0xC1);
2354     emit_rm(cbuf, 0x3, 7, HIGH_FROM_LOW($dst$$reg) );
2355     emit_d8(cbuf, 0x1F );
2356 // small:
2357     // SHRD   $dst.lo,$dst.hi,$shift
2358     emit_opcode(cbuf,0x0F);
2359     emit_opcode(cbuf,0xAD);
2360     emit_rm(cbuf, 0x3, HIGH_FROM_LOW($dst$$reg), $dst$$reg);
2361     // SAR    $dst.hi,$shift"
2362     emit_opcode(cbuf,0xD3);
2363     emit_rm(cbuf, 0x3, 0x7, HIGH_FROM_LOW($dst$$reg) );
2364   %}
2365 
2366 
2367   // ----------------- Encodings for floating point unit -----------------
2368   // May leave result in FPU-TOS or FPU reg depending on opcodes
2369   enc_class OpcReg_FPR(regFPR src) %{    // FMUL, FDIV
2370     $$$emit8$primary;
2371     emit_rm(cbuf, 0x3, $secondary, $src$$reg );
2372   %}
2373 
2374   // Pop argument in FPR0 with FSTP ST(0)
2375   enc_class PopFPU() %{
2376     emit_opcode( cbuf, 0xDD );
2377     emit_d8( cbuf, 0xD8 );
2378   %}
2379 
2380   // !!!!! equivalent to Pop_Reg_F
2381   enc_class Pop_Reg_DPR( regDPR dst ) %{
2382     emit_opcode( cbuf, 0xDD );           // FSTP   ST(i)
2383     emit_d8( cbuf, 0xD8+$dst$$reg );
2384   %}
2385 
2386   enc_class Push_Reg_DPR( regDPR dst ) %{
2387     emit_opcode( cbuf, 0xD9 );
2388     emit_d8( cbuf, 0xC0-1+$dst$$reg );   // FLD ST(i-1)
2389   %}
2390 
2391   enc_class strictfp_bias1( regDPR dst ) %{
2392     emit_opcode( cbuf, 0xDB );           // FLD m80real
2393     emit_opcode( cbuf, 0x2D );
2394     emit_d32( cbuf, (int)StubRoutines::addr_fpu_subnormal_bias1() );
2395     emit_opcode( cbuf, 0xDE );           // FMULP ST(dst), ST0
2396     emit_opcode( cbuf, 0xC8+$dst$$reg );
2397   %}
2398 
2399   enc_class strictfp_bias2( regDPR dst ) %{
2400     emit_opcode( cbuf, 0xDB );           // FLD m80real
2401     emit_opcode( cbuf, 0x2D );
2402     emit_d32( cbuf, (int)StubRoutines::addr_fpu_subnormal_bias2() );
2403     emit_opcode( cbuf, 0xDE );           // FMULP ST(dst), ST0
2404     emit_opcode( cbuf, 0xC8+$dst$$reg );
2405   %}
2406 
2407   // Special case for moving an integer register to a stack slot.
2408   enc_class OpcPRegSS( stackSlotI dst, rRegI src ) %{ // RegSS
2409     store_to_stackslot( cbuf, $primary, $src$$reg, $dst$$disp );
2410   %}
2411 
2412   // Special case for moving a register to a stack slot.
2413   enc_class RegSS( stackSlotI dst, rRegI src ) %{ // RegSS
2414     // Opcode already emitted
2415     emit_rm( cbuf, 0x02, $src$$reg, ESP_enc );   // R/M byte
2416     emit_rm( cbuf, 0x00, ESP_enc, ESP_enc);          // SIB byte
2417     emit_d32(cbuf, $dst$$disp);   // Displacement
2418   %}
2419 
2420   // Push the integer in stackSlot 'src' onto FP-stack
2421   enc_class Push_Mem_I( memory src ) %{    // FILD   [ESP+src]
2422     store_to_stackslot( cbuf, $primary, $secondary, $src$$disp );
2423   %}
2424 
2425   // Push FPU's TOS float to a stack-slot, and pop FPU-stack
2426   enc_class Pop_Mem_FPR( stackSlotF dst ) %{ // FSTP_S [ESP+dst]
2427     store_to_stackslot( cbuf, 0xD9, 0x03, $dst$$disp );
2428   %}
2429 
2430   // Same as Pop_Mem_F except for opcode
2431   // Push FPU's TOS double to a stack-slot, and pop FPU-stack
2432   enc_class Pop_Mem_DPR( stackSlotD dst ) %{ // FSTP_D [ESP+dst]
2433     store_to_stackslot( cbuf, 0xDD, 0x03, $dst$$disp );
2434   %}
2435 
2436   enc_class Pop_Reg_FPR( regFPR dst ) %{
2437     emit_opcode( cbuf, 0xDD );           // FSTP   ST(i)
2438     emit_d8( cbuf, 0xD8+$dst$$reg );
2439   %}
2440 
2441   enc_class Push_Reg_FPR( regFPR dst ) %{
2442     emit_opcode( cbuf, 0xD9 );           // FLD    ST(i-1)
2443     emit_d8( cbuf, 0xC0-1+$dst$$reg );
2444   %}
2445 
2446   // Push FPU's float to a stack-slot, and pop FPU-stack
2447   enc_class Pop_Mem_Reg_FPR( stackSlotF dst, regFPR src ) %{
2448     int pop = 0x02;
2449     if ($src$$reg != FPR1L_enc) {
2450       emit_opcode( cbuf, 0xD9 );         // FLD    ST(i-1)
2451       emit_d8( cbuf, 0xC0-1+$src$$reg );
2452       pop = 0x03;
2453     }
2454     store_to_stackslot( cbuf, 0xD9, pop, $dst$$disp ); // FST<P>_S  [ESP+dst]
2455   %}
2456 
2457   // Push FPU's double to a stack-slot, and pop FPU-stack
2458   enc_class Pop_Mem_Reg_DPR( stackSlotD dst, regDPR src ) %{
2459     int pop = 0x02;
2460     if ($src$$reg != FPR1L_enc) {
2461       emit_opcode( cbuf, 0xD9 );         // FLD    ST(i-1)
2462       emit_d8( cbuf, 0xC0-1+$src$$reg );
2463       pop = 0x03;
2464     }
2465     store_to_stackslot( cbuf, 0xDD, pop, $dst$$disp ); // FST<P>_D  [ESP+dst]
2466   %}
2467 
2468   // Push FPU's double to a FPU-stack-slot, and pop FPU-stack
2469   enc_class Pop_Reg_Reg_DPR( regDPR dst, regFPR src ) %{
2470     int pop = 0xD0 - 1; // -1 since we skip FLD
2471     if ($src$$reg != FPR1L_enc) {
2472       emit_opcode( cbuf, 0xD9 );         // FLD    ST(src-1)
2473       emit_d8( cbuf, 0xC0-1+$src$$reg );
2474       pop = 0xD8;
2475     }
2476     emit_opcode( cbuf, 0xDD );
2477     emit_d8( cbuf, pop+$dst$$reg );      // FST<P> ST(i)
2478   %}
2479 
2480 
2481   enc_class Push_Reg_Mod_DPR( regDPR dst, regDPR src) %{
2482     // load dst in FPR0
2483     emit_opcode( cbuf, 0xD9 );
2484     emit_d8( cbuf, 0xC0-1+$dst$$reg );
2485     if ($src$$reg != FPR1L_enc) {
2486       // fincstp
2487       emit_opcode (cbuf, 0xD9);
2488       emit_opcode (cbuf, 0xF7);
2489       // swap src with FPR1:
2490       // FXCH FPR1 with src
2491       emit_opcode(cbuf, 0xD9);
2492       emit_d8(cbuf, 0xC8-1+$src$$reg );
2493       // fdecstp
2494       emit_opcode (cbuf, 0xD9);
2495       emit_opcode (cbuf, 0xF6);
2496     }
2497   %}
2498 
2499   enc_class Push_ModD_encoding(regD src0, regD src1) %{
2500     MacroAssembler _masm(&cbuf);
2501     __ subptr(rsp, 8);
2502     __ movdbl(Address(rsp, 0), $src1$$XMMRegister);
2503     __ fld_d(Address(rsp, 0));
2504     __ movdbl(Address(rsp, 0), $src0$$XMMRegister);
2505     __ fld_d(Address(rsp, 0));
2506   %}
2507 
2508   enc_class Push_ModF_encoding(regF src0, regF src1) %{
2509     MacroAssembler _masm(&cbuf);
2510     __ subptr(rsp, 4);
2511     __ movflt(Address(rsp, 0), $src1$$XMMRegister);
2512     __ fld_s(Address(rsp, 0));
2513     __ movflt(Address(rsp, 0), $src0$$XMMRegister);
2514     __ fld_s(Address(rsp, 0));
2515   %}
2516 
2517   enc_class Push_ResultD(regD dst) %{
2518     MacroAssembler _masm(&cbuf);
2519     __ fstp_d(Address(rsp, 0));
2520     __ movdbl($dst$$XMMRegister, Address(rsp, 0));
2521     __ addptr(rsp, 8);
2522   %}
2523 
2524   enc_class Push_ResultF(regF dst, immI d8) %{
2525     MacroAssembler _masm(&cbuf);
2526     __ fstp_s(Address(rsp, 0));
2527     __ movflt($dst$$XMMRegister, Address(rsp, 0));
2528     __ addptr(rsp, $d8$$constant);
2529   %}
2530 
2531   enc_class Push_SrcD(regD src) %{
2532     MacroAssembler _masm(&cbuf);
2533     __ subptr(rsp, 8);
2534     __ movdbl(Address(rsp, 0), $src$$XMMRegister);
2535     __ fld_d(Address(rsp, 0));
2536   %}
2537 
2538   enc_class push_stack_temp_qword() %{
2539     MacroAssembler _masm(&cbuf);
2540     __ subptr(rsp, 8);
2541   %}
2542 
2543   enc_class pop_stack_temp_qword() %{
2544     MacroAssembler _masm(&cbuf);
2545     __ addptr(rsp, 8);
2546   %}
2547 
2548   enc_class push_xmm_to_fpr1(regD src) %{
2549     MacroAssembler _masm(&cbuf);
2550     __ movdbl(Address(rsp, 0), $src$$XMMRegister);
2551     __ fld_d(Address(rsp, 0));
2552   %}
2553 
2554   enc_class Push_Result_Mod_DPR( regDPR src) %{
2555     if ($src$$reg != FPR1L_enc) {
2556       // fincstp
2557       emit_opcode (cbuf, 0xD9);
2558       emit_opcode (cbuf, 0xF7);
2559       // FXCH FPR1 with src
2560       emit_opcode(cbuf, 0xD9);
2561       emit_d8(cbuf, 0xC8-1+$src$$reg );
2562       // fdecstp
2563       emit_opcode (cbuf, 0xD9);
2564       emit_opcode (cbuf, 0xF6);
2565     }
2566     // // following asm replaced with Pop_Reg_F or Pop_Mem_F
2567     // // FSTP   FPR$dst$$reg
2568     // emit_opcode( cbuf, 0xDD );
2569     // emit_d8( cbuf, 0xD8+$dst$$reg );
2570   %}
2571 
2572   enc_class fnstsw_sahf_skip_parity() %{
2573     // fnstsw ax
2574     emit_opcode( cbuf, 0xDF );
2575     emit_opcode( cbuf, 0xE0 );
2576     // sahf
2577     emit_opcode( cbuf, 0x9E );
2578     // jnp  ::skip
2579     emit_opcode( cbuf, 0x7B );
2580     emit_opcode( cbuf, 0x05 );
2581   %}
2582 
2583   enc_class emitModDPR() %{
2584     // fprem must be iterative
2585     // :: loop
2586     // fprem
2587     emit_opcode( cbuf, 0xD9 );
2588     emit_opcode( cbuf, 0xF8 );
2589     // wait
2590     emit_opcode( cbuf, 0x9b );
2591     // fnstsw ax
2592     emit_opcode( cbuf, 0xDF );
2593     emit_opcode( cbuf, 0xE0 );
2594     // sahf
2595     emit_opcode( cbuf, 0x9E );
2596     // jp  ::loop
2597     emit_opcode( cbuf, 0x0F );
2598     emit_opcode( cbuf, 0x8A );
2599     emit_opcode( cbuf, 0xF4 );
2600     emit_opcode( cbuf, 0xFF );
2601     emit_opcode( cbuf, 0xFF );
2602     emit_opcode( cbuf, 0xFF );
2603   %}
2604 
2605   enc_class fpu_flags() %{
2606     // fnstsw_ax
2607     emit_opcode( cbuf, 0xDF);
2608     emit_opcode( cbuf, 0xE0);
2609     // test ax,0x0400
2610     emit_opcode( cbuf, 0x66 );   // operand-size prefix for 16-bit immediate
2611     emit_opcode( cbuf, 0xA9 );
2612     emit_d16   ( cbuf, 0x0400 );
2613     // // // This sequence works, but stalls for 12-16 cycles on PPro
2614     // // test rax,0x0400
2615     // emit_opcode( cbuf, 0xA9 );
2616     // emit_d32   ( cbuf, 0x00000400 );
2617     //
2618     // jz exit (no unordered comparison)
2619     emit_opcode( cbuf, 0x74 );
2620     emit_d8    ( cbuf, 0x02 );
2621     // mov ah,1 - treat as LT case (set carry flag)
2622     emit_opcode( cbuf, 0xB4 );
2623     emit_d8    ( cbuf, 0x01 );
2624     // sahf
2625     emit_opcode( cbuf, 0x9E);
2626   %}
2627 
2628   enc_class cmpF_P6_fixup() %{
2629     // Fixup the integer flags in case comparison involved a NaN
2630     //
2631     // JNP exit (no unordered comparison, P-flag is set by NaN)
2632     emit_opcode( cbuf, 0x7B );
2633     emit_d8    ( cbuf, 0x03 );
2634     // MOV AH,1 - treat as LT case (set carry flag)
2635     emit_opcode( cbuf, 0xB4 );
2636     emit_d8    ( cbuf, 0x01 );
2637     // SAHF
2638     emit_opcode( cbuf, 0x9E);
2639     // NOP     // target for branch to avoid branch to branch
2640     emit_opcode( cbuf, 0x90);
2641   %}
2642 
2643 //     fnstsw_ax();
2644 //     sahf();
2645 //     movl(dst, nan_result);
2646 //     jcc(Assembler::parity, exit);
2647 //     movl(dst, less_result);
2648 //     jcc(Assembler::below, exit);
2649 //     movl(dst, equal_result);
2650 //     jcc(Assembler::equal, exit);
2651 //     movl(dst, greater_result);
2652 
2653 // less_result     =  1;
2654 // greater_result  = -1;
2655 // equal_result    = 0;
2656 // nan_result      = -1;
2657 
2658   enc_class CmpF_Result(rRegI dst) %{
2659     // fnstsw_ax();
2660     emit_opcode( cbuf, 0xDF);
2661     emit_opcode( cbuf, 0xE0);
2662     // sahf
2663     emit_opcode( cbuf, 0x9E);
2664     // movl(dst, nan_result);
2665     emit_opcode( cbuf, 0xB8 + $dst$$reg);
2666     emit_d32( cbuf, -1 );
2667     // jcc(Assembler::parity, exit);
2668     emit_opcode( cbuf, 0x7A );
2669     emit_d8    ( cbuf, 0x13 );
2670     // movl(dst, less_result);
2671     emit_opcode( cbuf, 0xB8 + $dst$$reg);
2672     emit_d32( cbuf, -1 );
2673     // jcc(Assembler::below, exit);
2674     emit_opcode( cbuf, 0x72 );
2675     emit_d8    ( cbuf, 0x0C );
2676     // movl(dst, equal_result);
2677     emit_opcode( cbuf, 0xB8 + $dst$$reg);
2678     emit_d32( cbuf, 0 );
2679     // jcc(Assembler::equal, exit);
2680     emit_opcode( cbuf, 0x74 );
2681     emit_d8    ( cbuf, 0x05 );
2682     // movl(dst, greater_result);
2683     emit_opcode( cbuf, 0xB8 + $dst$$reg);
2684     emit_d32( cbuf, 1 );
2685   %}
2686 
2687 
2688   // Compare the longs and set flags
2689   // BROKEN!  Do Not use as-is
2690   enc_class cmpl_test( eRegL src1, eRegL src2 ) %{
2691     // CMP    $src1.hi,$src2.hi
2692     emit_opcode( cbuf, 0x3B );
2693     emit_rm(cbuf, 0x3, HIGH_FROM_LOW($src1$$reg), HIGH_FROM_LOW($src2$$reg) );
2694     // JNE,s  done
2695     emit_opcode(cbuf,0x75);
2696     emit_d8(cbuf, 2 );
2697     // CMP    $src1.lo,$src2.lo
2698     emit_opcode( cbuf, 0x3B );
2699     emit_rm(cbuf, 0x3, $src1$$reg, $src2$$reg );
2700 // done:
2701   %}
2702 
2703   enc_class convert_int_long( regL dst, rRegI src ) %{
2704     // mov $dst.lo,$src
2705     int dst_encoding = $dst$$reg;
2706     int src_encoding = $src$$reg;
2707     encode_Copy( cbuf, dst_encoding  , src_encoding );
2708     // mov $dst.hi,$src
2709     encode_Copy( cbuf, HIGH_FROM_LOW(dst_encoding), src_encoding );
2710     // sar $dst.hi,31
2711     emit_opcode( cbuf, 0xC1 );
2712     emit_rm(cbuf, 0x3, 7, HIGH_FROM_LOW(dst_encoding) );
2713     emit_d8(cbuf, 0x1F );
2714   %}
2715 
2716   enc_class convert_long_double( eRegL src ) %{
2717     // push $src.hi
2718     emit_opcode(cbuf, 0x50+HIGH_FROM_LOW($src$$reg));
2719     // push $src.lo
2720     emit_opcode(cbuf, 0x50+$src$$reg  );
2721     // fild 64-bits at [SP]
2722     emit_opcode(cbuf,0xdf);
2723     emit_d8(cbuf, 0x6C);
2724     emit_d8(cbuf, 0x24);
2725     emit_d8(cbuf, 0x00);
2726     // pop stack
2727     emit_opcode(cbuf, 0x83); // add  SP, #8
2728     emit_rm(cbuf, 0x3, 0x00, ESP_enc);
2729     emit_d8(cbuf, 0x8);
2730   %}
2731 
2732   enc_class multiply_con_and_shift_high( eDXRegI dst, nadxRegI src1, eADXRegL_low_only src2, immI_32_63 cnt, eFlagsReg cr ) %{
2733     // IMUL   EDX:EAX,$src1
2734     emit_opcode( cbuf, 0xF7 );
2735     emit_rm( cbuf, 0x3, 0x5, $src1$$reg );
2736     // SAR    EDX,$cnt-32
2737     int shift_count = ((int)$cnt$$constant) - 32;
2738     if (shift_count > 0) {
2739       emit_opcode(cbuf, 0xC1);
2740       emit_rm(cbuf, 0x3, 7, $dst$$reg );
2741       emit_d8(cbuf, shift_count);
2742     }
2743   %}
2744 
2745   // this version doesn't have add sp, 8
2746   enc_class convert_long_double2( eRegL src ) %{
2747     // push $src.hi
2748     emit_opcode(cbuf, 0x50+HIGH_FROM_LOW($src$$reg));
2749     // push $src.lo
2750     emit_opcode(cbuf, 0x50+$src$$reg  );
2751     // fild 64-bits at [SP]
2752     emit_opcode(cbuf,0xdf);
2753     emit_d8(cbuf, 0x6C);
2754     emit_d8(cbuf, 0x24);
2755     emit_d8(cbuf, 0x00);
2756   %}
2757 
2758   enc_class long_int_multiply( eADXRegL dst, nadxRegI src) %{
2759     // Basic idea: long = (long)int * (long)int
2760     // IMUL EDX:EAX, src
2761     emit_opcode( cbuf, 0xF7 );
2762     emit_rm( cbuf, 0x3, 0x5, $src$$reg);
2763   %}
2764 
2765   enc_class long_uint_multiply( eADXRegL dst, nadxRegI src) %{
2766     // Basic Idea:  long = (int & 0xffffffffL) * (int & 0xffffffffL)
2767     // MUL EDX:EAX, src
2768     emit_opcode( cbuf, 0xF7 );
2769     emit_rm( cbuf, 0x3, 0x4, $src$$reg);
2770   %}
2771 
2772   enc_class long_multiply( eADXRegL dst, eRegL src, rRegI tmp ) %{
2773     // Basic idea: lo(result) = lo(x_lo * y_lo)
2774     //             hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi)
2775     // MOV    $tmp,$src.lo
2776     encode_Copy( cbuf, $tmp$$reg, $src$$reg );
2777     // IMUL   $tmp,EDX
2778     emit_opcode( cbuf, 0x0F );
2779     emit_opcode( cbuf, 0xAF );
2780     emit_rm( cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW($dst$$reg) );
2781     // MOV    EDX,$src.hi
2782     encode_Copy( cbuf, HIGH_FROM_LOW($dst$$reg), HIGH_FROM_LOW($src$$reg) );
2783     // IMUL   EDX,EAX
2784     emit_opcode( cbuf, 0x0F );
2785     emit_opcode( cbuf, 0xAF );
2786     emit_rm( cbuf, 0x3, HIGH_FROM_LOW($dst$$reg), $dst$$reg );
2787     // ADD    $tmp,EDX
2788     emit_opcode( cbuf, 0x03 );
2789     emit_rm( cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW($dst$$reg) );
2790     // MUL   EDX:EAX,$src.lo
2791     emit_opcode( cbuf, 0xF7 );
2792     emit_rm( cbuf, 0x3, 0x4, $src$$reg );
2793     // ADD    EDX,ESI
2794     emit_opcode( cbuf, 0x03 );
2795     emit_rm( cbuf, 0x3, HIGH_FROM_LOW($dst$$reg), $tmp$$reg );
2796   %}
2797 
2798   enc_class long_multiply_con( eADXRegL dst, immL_127 src, rRegI tmp ) %{
2799     // Basic idea: lo(result) = lo(src * y_lo)
2800     //             hi(result) = hi(src * y_lo) + lo(src * y_hi)
2801     // IMUL   $tmp,EDX,$src
2802     emit_opcode( cbuf, 0x6B );
2803     emit_rm( cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW($dst$$reg) );
2804     emit_d8( cbuf, (int)$src$$constant );
2805     // MOV    EDX,$src
2806     emit_opcode(cbuf, 0xB8 + EDX_enc);
2807     emit_d32( cbuf, (int)$src$$constant );
2808     // MUL   EDX:EAX,EDX
2809     emit_opcode( cbuf, 0xF7 );
2810     emit_rm( cbuf, 0x3, 0x4, EDX_enc );
2811     // ADD    EDX,ESI
2812     emit_opcode( cbuf, 0x03 );
2813     emit_rm( cbuf, 0x3, EDX_enc, $tmp$$reg );
2814   %}
2815 
2816   enc_class long_div( eRegL src1, eRegL src2 ) %{
2817     // PUSH src1.hi
2818     emit_opcode(cbuf, HIGH_FROM_LOW(0x50+$src1$$reg) );
2819     // PUSH src1.lo
2820     emit_opcode(cbuf,               0x50+$src1$$reg  );
2821     // PUSH src2.hi
2822     emit_opcode(cbuf, HIGH_FROM_LOW(0x50+$src2$$reg) );
2823     // PUSH src2.lo
2824     emit_opcode(cbuf,               0x50+$src2$$reg  );
2825     // CALL directly to the runtime
2826     cbuf.set_insts_mark();
2827     emit_opcode(cbuf,0xE8);       // Call into runtime
2828     emit_d32_reloc(cbuf, (CAST_FROM_FN_PTR(address, SharedRuntime::ldiv) - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
2829     // Restore stack
2830     emit_opcode(cbuf, 0x83); // add  SP, #framesize
2831     emit_rm(cbuf, 0x3, 0x00, ESP_enc);
2832     emit_d8(cbuf, 4*4);
2833   %}
2834 
2835   enc_class long_mod( eRegL src1, eRegL src2 ) %{
2836     // PUSH src1.hi
2837     emit_opcode(cbuf, HIGH_FROM_LOW(0x50+$src1$$reg) );
2838     // PUSH src1.lo
2839     emit_opcode(cbuf,               0x50+$src1$$reg  );
2840     // PUSH src2.hi
2841     emit_opcode(cbuf, HIGH_FROM_LOW(0x50+$src2$$reg) );
2842     // PUSH src2.lo
2843     emit_opcode(cbuf,               0x50+$src2$$reg  );
2844     // CALL directly to the runtime
2845     cbuf.set_insts_mark();
2846     emit_opcode(cbuf,0xE8);       // Call into runtime
2847     emit_d32_reloc(cbuf, (CAST_FROM_FN_PTR(address, SharedRuntime::lrem ) - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
2848     // Restore stack
2849     emit_opcode(cbuf, 0x83); // add  SP, #framesize
2850     emit_rm(cbuf, 0x3, 0x00, ESP_enc);
2851     emit_d8(cbuf, 4*4);
2852   %}
2853 
2854   enc_class long_cmp_flags0( eRegL src, rRegI tmp ) %{
2855     // MOV   $tmp,$src.lo
2856     emit_opcode(cbuf, 0x8B);
2857     emit_rm(cbuf, 0x3, $tmp$$reg, $src$$reg);
2858     // OR    $tmp,$src.hi
2859     emit_opcode(cbuf, 0x0B);
2860     emit_rm(cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW($src$$reg));
2861   %}
2862 
2863   enc_class long_cmp_flags1( eRegL src1, eRegL src2 ) %{
2864     // CMP    $src1.lo,$src2.lo
2865     emit_opcode( cbuf, 0x3B );
2866     emit_rm(cbuf, 0x3, $src1$$reg, $src2$$reg );
2867     // JNE,s  skip
2868     emit_cc(cbuf, 0x70, 0x5);
2869     emit_d8(cbuf,2);
2870     // CMP    $src1.hi,$src2.hi
2871     emit_opcode( cbuf, 0x3B );
2872     emit_rm(cbuf, 0x3, HIGH_FROM_LOW($src1$$reg), HIGH_FROM_LOW($src2$$reg) );
2873   %}
2874 
2875   enc_class long_cmp_flags2( eRegL src1, eRegL src2, rRegI tmp ) %{
2876     // CMP    $src1.lo,$src2.lo\t! Long compare; set flags for low bits
2877     emit_opcode( cbuf, 0x3B );
2878     emit_rm(cbuf, 0x3, $src1$$reg, $src2$$reg );
2879     // MOV    $tmp,$src1.hi
2880     emit_opcode( cbuf, 0x8B );
2881     emit_rm(cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW($src1$$reg) );
2882     // SBB   $tmp,$src2.hi\t! Compute flags for long compare
2883     emit_opcode( cbuf, 0x1B );
2884     emit_rm(cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW($src2$$reg) );
2885   %}
2886 
2887   enc_class long_cmp_flags3( eRegL src, rRegI tmp ) %{
2888     // XOR    $tmp,$tmp
2889     emit_opcode(cbuf,0x33);  // XOR
2890     emit_rm(cbuf,0x3, $tmp$$reg, $tmp$$reg);
2891     // CMP    $tmp,$src.lo
2892     emit_opcode( cbuf, 0x3B );
2893     emit_rm(cbuf, 0x3, $tmp$$reg, $src$$reg );
2894     // SBB    $tmp,$src.hi
2895     emit_opcode( cbuf, 0x1B );
2896     emit_rm(cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW($src$$reg) );
2897   %}
2898 
2899  // Sniff, sniff... smells like Gnu Superoptimizer
2900   enc_class neg_long( eRegL dst ) %{
2901     emit_opcode(cbuf,0xF7);    // NEG hi
2902     emit_rm    (cbuf,0x3, 0x3, HIGH_FROM_LOW($dst$$reg));
2903     emit_opcode(cbuf,0xF7);    // NEG lo
2904     emit_rm    (cbuf,0x3, 0x3,               $dst$$reg );
2905     emit_opcode(cbuf,0x83);    // SBB hi,0
2906     emit_rm    (cbuf,0x3, 0x3, HIGH_FROM_LOW($dst$$reg));
2907     emit_d8    (cbuf,0 );
2908   %}
2909 
2910   enc_class enc_pop_rdx() %{
2911     emit_opcode(cbuf,0x5A);
2912   %}
2913 
2914   enc_class enc_rethrow() %{
2915     cbuf.set_insts_mark();
2916     emit_opcode(cbuf, 0xE9);        // jmp    entry
2917     emit_d32_reloc(cbuf, (int)OptoRuntime::rethrow_stub() - ((int)cbuf.insts_end())-4,
2918                    runtime_call_Relocation::spec(), RELOC_IMM32 );
2919   %}
2920 
2921 
2922   // Convert a double to an int.  Java semantics require we do complex
2923   // manglelations in the corner cases.  So we set the rounding mode to
2924   // 'zero', store the darned double down as an int, and reset the
2925   // rounding mode to 'nearest'.  The hardware throws an exception which
2926   // patches up the correct value directly to the stack.
2927   enc_class DPR2I_encoding( regDPR src ) %{
2928     // Flip to round-to-zero mode.  We attempted to allow invalid-op
2929     // exceptions here, so that a NAN or other corner-case value will
2930     // thrown an exception (but normal values get converted at full speed).
2931     // However, I2C adapters and other float-stack manglers leave pending
2932     // invalid-op exceptions hanging.  We would have to clear them before
2933     // enabling them and that is more expensive than just testing for the
2934     // invalid value Intel stores down in the corner cases.
2935     emit_opcode(cbuf,0xD9);            // FLDCW  trunc
2936     emit_opcode(cbuf,0x2D);
2937     emit_d32(cbuf,(int)StubRoutines::addr_fpu_cntrl_wrd_trunc());
2938     // Allocate a word
2939     emit_opcode(cbuf,0x83);            // SUB ESP,4
2940     emit_opcode(cbuf,0xEC);
2941     emit_d8(cbuf,0x04);
2942     // Encoding assumes a double has been pushed into FPR0.
2943     // Store down the double as an int, popping the FPU stack
2944     emit_opcode(cbuf,0xDB);            // FISTP [ESP]
2945     emit_opcode(cbuf,0x1C);
2946     emit_d8(cbuf,0x24);
2947     // Restore the rounding mode; mask the exception
2948     emit_opcode(cbuf,0xD9);            // FLDCW   std/24-bit mode
2949     emit_opcode(cbuf,0x2D);
2950     emit_d32( cbuf, Compile::current()->in_24_bit_fp_mode()
2951         ? (int)StubRoutines::addr_fpu_cntrl_wrd_24()
2952         : (int)StubRoutines::addr_fpu_cntrl_wrd_std());
2953 
2954     // Load the converted int; adjust CPU stack
2955     emit_opcode(cbuf,0x58);       // POP EAX
2956     emit_opcode(cbuf,0x3D);       // CMP EAX,imm
2957     emit_d32   (cbuf,0x80000000); //         0x80000000
2958     emit_opcode(cbuf,0x75);       // JNE around_slow_call
2959     emit_d8    (cbuf,0x07);       // Size of slow_call
2960     // Push src onto stack slow-path
2961     emit_opcode(cbuf,0xD9 );      // FLD     ST(i)
2962     emit_d8    (cbuf,0xC0-1+$src$$reg );
2963     // CALL directly to the runtime
2964     cbuf.set_insts_mark();
2965     emit_opcode(cbuf,0xE8);       // Call into runtime
2966     emit_d32_reloc(cbuf, (StubRoutines::d2i_wrapper() - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
2967     // Carry on here...
2968   %}
2969 
2970   enc_class DPR2L_encoding( regDPR src ) %{
2971     emit_opcode(cbuf,0xD9);            // FLDCW  trunc
2972     emit_opcode(cbuf,0x2D);
2973     emit_d32(cbuf,(int)StubRoutines::addr_fpu_cntrl_wrd_trunc());
2974     // Allocate a word
2975     emit_opcode(cbuf,0x83);            // SUB ESP,8
2976     emit_opcode(cbuf,0xEC);
2977     emit_d8(cbuf,0x08);
2978     // Encoding assumes a double has been pushed into FPR0.
2979     // Store down the double as a long, popping the FPU stack
2980     emit_opcode(cbuf,0xDF);            // FISTP [ESP]
2981     emit_opcode(cbuf,0x3C);
2982     emit_d8(cbuf,0x24);
2983     // Restore the rounding mode; mask the exception
2984     emit_opcode(cbuf,0xD9);            // FLDCW   std/24-bit mode
2985     emit_opcode(cbuf,0x2D);
2986     emit_d32( cbuf, Compile::current()->in_24_bit_fp_mode()
2987         ? (int)StubRoutines::addr_fpu_cntrl_wrd_24()
2988         : (int)StubRoutines::addr_fpu_cntrl_wrd_std());
2989 
2990     // Load the converted int; adjust CPU stack
2991     emit_opcode(cbuf,0x58);       // POP EAX
2992     emit_opcode(cbuf,0x5A);       // POP EDX
2993     emit_opcode(cbuf,0x81);       // CMP EDX,imm
2994     emit_d8    (cbuf,0xFA);       // rdx
2995     emit_d32   (cbuf,0x80000000); //         0x80000000
2996     emit_opcode(cbuf,0x75);       // JNE around_slow_call
2997     emit_d8    (cbuf,0x07+4);     // Size of slow_call
2998     emit_opcode(cbuf,0x85);       // TEST EAX,EAX
2999     emit_opcode(cbuf,0xC0);       // 2/rax,/rax,
3000     emit_opcode(cbuf,0x75);       // JNE around_slow_call
3001     emit_d8    (cbuf,0x07);       // Size of slow_call
3002     // Push src onto stack slow-path
3003     emit_opcode(cbuf,0xD9 );      // FLD     ST(i)
3004     emit_d8    (cbuf,0xC0-1+$src$$reg );
3005     // CALL directly to the runtime
3006     cbuf.set_insts_mark();
3007     emit_opcode(cbuf,0xE8);       // Call into runtime
3008     emit_d32_reloc(cbuf, (StubRoutines::d2l_wrapper() - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
3009     // Carry on here...
3010   %}
3011 
3012   enc_class FMul_ST_reg( eRegFPR src1 ) %{
3013     // Operand was loaded from memory into fp ST (stack top)
3014     // FMUL   ST,$src  /* D8 C8+i */
3015     emit_opcode(cbuf, 0xD8);
3016     emit_opcode(cbuf, 0xC8 + $src1$$reg);
3017   %}
3018 
3019   enc_class FAdd_ST_reg( eRegFPR src2 ) %{
3020     // FADDP  ST,src2  /* D8 C0+i */
3021     emit_opcode(cbuf, 0xD8);
3022     emit_opcode(cbuf, 0xC0 + $src2$$reg);
3023     //could use FADDP  src2,fpST  /* DE C0+i */
3024   %}
3025 
3026   enc_class FAddP_reg_ST( eRegFPR src2 ) %{
3027     // FADDP  src2,ST  /* DE C0+i */
3028     emit_opcode(cbuf, 0xDE);
3029     emit_opcode(cbuf, 0xC0 + $src2$$reg);
3030   %}
3031 
3032   enc_class subFPR_divFPR_encode( eRegFPR src1, eRegFPR src2) %{
3033     // Operand has been loaded into fp ST (stack top)
3034       // FSUB   ST,$src1
3035       emit_opcode(cbuf, 0xD8);
3036       emit_opcode(cbuf, 0xE0 + $src1$$reg);
3037 
3038       // FDIV
3039       emit_opcode(cbuf, 0xD8);
3040       emit_opcode(cbuf, 0xF0 + $src2$$reg);
3041   %}
3042 
3043   enc_class MulFAddF (eRegFPR src1, eRegFPR src2) %{
3044     // Operand was loaded from memory into fp ST (stack top)
3045     // FADD   ST,$src  /* D8 C0+i */
3046     emit_opcode(cbuf, 0xD8);
3047     emit_opcode(cbuf, 0xC0 + $src1$$reg);
3048 
3049     // FMUL  ST,src2  /* D8 C*+i */
3050     emit_opcode(cbuf, 0xD8);
3051     emit_opcode(cbuf, 0xC8 + $src2$$reg);
3052   %}
3053 
3054 
3055   enc_class MulFAddFreverse (eRegFPR src1, eRegFPR src2) %{
3056     // Operand was loaded from memory into fp ST (stack top)
3057     // FADD   ST,$src  /* D8 C0+i */
3058     emit_opcode(cbuf, 0xD8);
3059     emit_opcode(cbuf, 0xC0 + $src1$$reg);
3060 
3061     // FMULP  src2,ST  /* DE C8+i */
3062     emit_opcode(cbuf, 0xDE);
3063     emit_opcode(cbuf, 0xC8 + $src2$$reg);
3064   %}
3065 
3066   // Atomically load the volatile long
3067   enc_class enc_loadL_volatile( memory mem, stackSlotL dst ) %{
3068     emit_opcode(cbuf,0xDF);
3069     int rm_byte_opcode = 0x05;
3070     int base     = $mem$$base;
3071     int index    = $mem$$index;
3072     int scale    = $mem$$scale;
3073     int displace = $mem$$disp;
3074     relocInfo::relocType disp_reloc = $mem->disp_reloc(); // disp-as-oop when working with static globals
3075     encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace, disp_reloc);
3076     store_to_stackslot( cbuf, 0x0DF, 0x07, $dst$$disp );
3077   %}
3078 
3079   // Volatile Store Long.  Must be atomic, so move it into
3080   // the FP TOS and then do a 64-bit FIST.  Has to probe the
3081   // target address before the store (for null-ptr checks)
3082   // so the memory operand is used twice in the encoding.
3083   enc_class enc_storeL_volatile( memory mem, stackSlotL src ) %{
3084     store_to_stackslot( cbuf, 0x0DF, 0x05, $src$$disp );
3085     cbuf.set_insts_mark();            // Mark start of FIST in case $mem has an oop
3086     emit_opcode(cbuf,0xDF);
3087     int rm_byte_opcode = 0x07;
3088     int base     = $mem$$base;
3089     int index    = $mem$$index;
3090     int scale    = $mem$$scale;
3091     int displace = $mem$$disp;
3092     relocInfo::relocType disp_reloc = $mem->disp_reloc(); // disp-as-oop when working with static globals
3093     encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace, disp_reloc);
3094   %}
3095 
3096   // Safepoint Poll.  This polls the safepoint page, and causes an
3097   // exception if it is not readable. Unfortunately, it kills the condition code
3098   // in the process
3099   // We current use TESTL [spp],EDI
3100   // A better choice might be TESTB [spp + pagesize() - CacheLineSize()],0
3101 
3102   enc_class Safepoint_Poll() %{
3103     cbuf.relocate(cbuf.insts_mark(), relocInfo::poll_type, 0);
3104     emit_opcode(cbuf,0x85);
3105     emit_rm (cbuf, 0x0, 0x7, 0x5);
3106     emit_d32(cbuf, (intptr_t)os::get_polling_page());
3107   %}
3108 %}
3109 
3110 
3111 //----------FRAME--------------------------------------------------------------
3112 // Definition of frame structure and management information.
3113 //
3114 //  S T A C K   L A Y O U T    Allocators stack-slot number
3115 //                             |   (to get allocators register number
3116 //  G  Owned by    |        |  v    add OptoReg::stack0())
3117 //  r   CALLER     |        |
3118 //  o     |        +--------+      pad to even-align allocators stack-slot
3119 //  w     V        |  pad0  |        numbers; owned by CALLER
3120 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
3121 //  h     ^        |   in   |  5
3122 //        |        |  args  |  4   Holes in incoming args owned by SELF
3123 //  |     |        |        |  3
3124 //  |     |        +--------+
3125 //  V     |        | old out|      Empty on Intel, window on Sparc
3126 //        |    old |preserve|      Must be even aligned.
3127 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
3128 //        |        |   in   |  3   area for Intel ret address
3129 //     Owned by    |preserve|      Empty on Sparc.
3130 //       SELF      +--------+
3131 //        |        |  pad2  |  2   pad to align old SP
3132 //        |        +--------+  1
3133 //        |        | locks  |  0
3134 //        |        +--------+----> OptoReg::stack0(), even aligned
3135 //        |        |  pad1  | 11   pad to align new SP
3136 //        |        +--------+
3137 //        |        |        | 10
3138 //        |        | spills |  9   spills
3139 //        V        |        |  8   (pad0 slot for callee)
3140 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
3141 //        ^        |  out   |  7
3142 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
3143 //     Owned by    +--------+
3144 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
3145 //        |    new |preserve|      Must be even-aligned.
3146 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
3147 //        |        |        |
3148 //
3149 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
3150 //         known from SELF's arguments and the Java calling convention.
3151 //         Region 6-7 is determined per call site.
3152 // Note 2: If the calling convention leaves holes in the incoming argument
3153 //         area, those holes are owned by SELF.  Holes in the outgoing area
3154 //         are owned by the CALLEE.  Holes should not be nessecary in the
3155 //         incoming area, as the Java calling convention is completely under
3156 //         the control of the AD file.  Doubles can be sorted and packed to
3157 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
3158 //         varargs C calling conventions.
3159 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
3160 //         even aligned with pad0 as needed.
3161 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
3162 //         region 6-11 is even aligned; it may be padded out more so that
3163 //         the region from SP to FP meets the minimum stack alignment.
3164 
3165 frame %{
3166   // What direction does stack grow in (assumed to be same for C & Java)
3167   stack_direction(TOWARDS_LOW);
3168 
3169   // These three registers define part of the calling convention
3170   // between compiled code and the interpreter.
3171   inline_cache_reg(EAX);                // Inline Cache Register
3172   interpreter_method_oop_reg(EBX);      // Method Oop Register when calling interpreter
3173 
3174   // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3175   cisc_spilling_operand_name(indOffset32);
3176 
3177   // Number of stack slots consumed by locking an object
3178   sync_stack_slots(1);
3179 
3180   // Compiled code's Frame Pointer
3181   frame_pointer(ESP);
3182   // Interpreter stores its frame pointer in a register which is
3183   // stored to the stack by I2CAdaptors.
3184   // I2CAdaptors convert from interpreted java to compiled java.
3185   interpreter_frame_pointer(EBP);
3186 
3187   // Stack alignment requirement
3188   // Alignment size in bytes (128-bit -> 16 bytes)
3189   stack_alignment(StackAlignmentInBytes);
3190 
3191   // Number of stack slots between incoming argument block and the start of
3192   // a new frame.  The PROLOG must add this many slots to the stack.  The
3193   // EPILOG must remove this many slots.  Intel needs one slot for
3194   // return address and one for rbp, (must save rbp)
3195   in_preserve_stack_slots(2+VerifyStackAtCalls);
3196 
3197   // Number of outgoing stack slots killed above the out_preserve_stack_slots
3198   // for calls to C.  Supports the var-args backing area for register parms.
3199   varargs_C_out_slots_killed(0);
3200 
3201   // The after-PROLOG location of the return address.  Location of
3202   // return address specifies a type (REG or STACK) and a number
3203   // representing the register number (i.e. - use a register name) or
3204   // stack slot.
3205   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3206   // Otherwise, it is above the locks and verification slot and alignment word
3207   return_addr(STACK - 1 +
3208               round_to((Compile::current()->in_preserve_stack_slots() +
3209                         Compile::current()->fixed_slots()),
3210                        stack_alignment_in_slots()));
3211 
3212   // Body of function which returns an integer array locating
3213   // arguments either in registers or in stack slots.  Passed an array
3214   // of ideal registers called "sig" and a "length" count.  Stack-slot
3215   // offsets are based on outgoing arguments, i.e. a CALLER setting up
3216   // arguments for a CALLEE.  Incoming stack arguments are
3217   // automatically biased by the preserve_stack_slots field above.
3218   calling_convention %{
3219     // No difference between ingoing/outgoing just pass false
3220     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3221   %}
3222 
3223 
3224   // Body of function which returns an integer array locating
3225   // arguments either in registers or in stack slots.  Passed an array
3226   // of ideal registers called "sig" and a "length" count.  Stack-slot
3227   // offsets are based on outgoing arguments, i.e. a CALLER setting up
3228   // arguments for a CALLEE.  Incoming stack arguments are
3229   // automatically biased by the preserve_stack_slots field above.
3230   c_calling_convention %{
3231     // This is obviously always outgoing
3232     (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3233   %}
3234 
3235   // Location of C & interpreter return values
3236   c_return_value %{
3237     assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3238     static int lo[Op_RegL+1] = { 0, 0, OptoReg::Bad, EAX_num,      EAX_num,      FPR1L_num,    FPR1L_num, EAX_num };
3239     static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, FPR1H_num, EDX_num };
3240 
3241     // in SSE2+ mode we want to keep the FPU stack clean so pretend
3242     // that C functions return float and double results in XMM0.
3243     if( ideal_reg == Op_RegD && UseSSE>=2 )
3244       return OptoRegPair(XMM0b_num,XMM0_num);
3245     if( ideal_reg == Op_RegF && UseSSE>=2 )
3246       return OptoRegPair(OptoReg::Bad,XMM0_num);
3247 
3248     return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3249   %}
3250 
3251   // Location of return values
3252   return_value %{
3253     assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3254     static int lo[Op_RegL+1] = { 0, 0, OptoReg::Bad, EAX_num,      EAX_num,      FPR1L_num,    FPR1L_num, EAX_num };
3255     static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, FPR1H_num, EDX_num };
3256     if( ideal_reg == Op_RegD && UseSSE>=2 )
3257       return OptoRegPair(XMM0b_num,XMM0_num);
3258     if( ideal_reg == Op_RegF && UseSSE>=1 )
3259       return OptoRegPair(OptoReg::Bad,XMM0_num);
3260     return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3261   %}
3262 
3263 %}
3264 
3265 //----------ATTRIBUTES---------------------------------------------------------
3266 //----------Operand Attributes-------------------------------------------------
3267 op_attrib op_cost(0);        // Required cost attribute
3268 
3269 //----------Instruction Attributes---------------------------------------------
3270 ins_attrib ins_cost(100);       // Required cost attribute
3271 ins_attrib ins_size(8);         // Required size attribute (in bits)
3272 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3273                                 // non-matching short branch variant of some
3274                                                             // long branch?
3275 ins_attrib ins_alignment(1);    // Required alignment attribute (must be a power of 2)
3276                                 // specifies the alignment that some part of the instruction (not
3277                                 // necessarily the start) requires.  If > 1, a compute_padding()
3278                                 // function must be provided for the instruction
3279 
3280 //----------OPERANDS-----------------------------------------------------------
3281 // Operand definitions must precede instruction definitions for correct parsing
3282 // in the ADLC because operands constitute user defined types which are used in
3283 // instruction definitions.
3284 
3285 //----------Simple Operands----------------------------------------------------
3286 // Immediate Operands
3287 // Integer Immediate
3288 operand immI() %{
3289   match(ConI);
3290 
3291   op_cost(10);
3292   format %{ %}
3293   interface(CONST_INTER);
3294 %}
3295 
3296 // Constant for test vs zero
3297 operand immI0() %{
3298   predicate(n->get_int() == 0);
3299   match(ConI);
3300 
3301   op_cost(0);
3302   format %{ %}
3303   interface(CONST_INTER);
3304 %}
3305 
3306 // Constant for increment
3307 operand immI1() %{
3308   predicate(n->get_int() == 1);
3309   match(ConI);
3310 
3311   op_cost(0);
3312   format %{ %}
3313   interface(CONST_INTER);
3314 %}
3315 
3316 // Constant for decrement
3317 operand immI_M1() %{
3318   predicate(n->get_int() == -1);
3319   match(ConI);
3320 
3321   op_cost(0);
3322   format %{ %}
3323   interface(CONST_INTER);
3324 %}
3325 
3326 // Valid scale values for addressing modes
3327 operand immI2() %{
3328   predicate(0 <= n->get_int() && (n->get_int() <= 3));
3329   match(ConI);
3330 
3331   format %{ %}
3332   interface(CONST_INTER);
3333 %}
3334 
3335 operand immI8() %{
3336   predicate((-128 <= n->get_int()) && (n->get_int() <= 127));
3337   match(ConI);
3338 
3339   op_cost(5);
3340   format %{ %}
3341   interface(CONST_INTER);
3342 %}
3343 
3344 operand immI16() %{
3345   predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
3346   match(ConI);
3347 
3348   op_cost(10);
3349   format %{ %}
3350   interface(CONST_INTER);
3351 %}
3352 
3353 // Int Immediate non-negative
3354 operand immU31()
3355 %{
3356   predicate(n->get_int() >= 0);
3357   match(ConI);
3358 
3359   op_cost(0);
3360   format %{ %}
3361   interface(CONST_INTER);
3362 %}
3363 
3364 // Constant for long shifts
3365 operand immI_32() %{
3366   predicate( n->get_int() == 32 );
3367   match(ConI);
3368 
3369   op_cost(0);
3370   format %{ %}
3371   interface(CONST_INTER);
3372 %}
3373 
3374 operand immI_1_31() %{
3375   predicate( n->get_int() >= 1 && n->get_int() <= 31 );
3376   match(ConI);
3377 
3378   op_cost(0);
3379   format %{ %}
3380   interface(CONST_INTER);
3381 %}
3382 
3383 operand immI_32_63() %{
3384   predicate( n->get_int() >= 32 && n->get_int() <= 63 );
3385   match(ConI);
3386   op_cost(0);
3387 
3388   format %{ %}
3389   interface(CONST_INTER);
3390 %}
3391 
3392 operand immI_1() %{
3393   predicate( n->get_int() == 1 );
3394   match(ConI);
3395 
3396   op_cost(0);
3397   format %{ %}
3398   interface(CONST_INTER);
3399 %}
3400 
3401 operand immI_2() %{
3402   predicate( n->get_int() == 2 );
3403   match(ConI);
3404 
3405   op_cost(0);
3406   format %{ %}
3407   interface(CONST_INTER);
3408 %}
3409 
3410 operand immI_3() %{
3411   predicate( n->get_int() == 3 );
3412   match(ConI);
3413 
3414   op_cost(0);
3415   format %{ %}
3416   interface(CONST_INTER);
3417 %}
3418 
3419 // Pointer Immediate
3420 operand immP() %{
3421   match(ConP);
3422 
3423   op_cost(10);
3424   format %{ %}
3425   interface(CONST_INTER);
3426 %}
3427 
3428 // NULL Pointer Immediate
3429 operand immP0() %{
3430   predicate( n->get_ptr() == 0 );
3431   match(ConP);
3432   op_cost(0);
3433 
3434   format %{ %}
3435   interface(CONST_INTER);
3436 %}
3437 
3438 // Long Immediate
3439 operand immL() %{
3440   match(ConL);
3441 
3442   op_cost(20);
3443   format %{ %}
3444   interface(CONST_INTER);
3445 %}
3446 
3447 // Long Immediate zero
3448 operand immL0() %{
3449   predicate( n->get_long() == 0L );
3450   match(ConL);
3451   op_cost(0);
3452 
3453   format %{ %}
3454   interface(CONST_INTER);
3455 %}
3456 
3457 // Long Immediate zero
3458 operand immL_M1() %{
3459   predicate( n->get_long() == -1L );
3460   match(ConL);
3461   op_cost(0);
3462 
3463   format %{ %}
3464   interface(CONST_INTER);
3465 %}
3466 
3467 // Long immediate from 0 to 127.
3468 // Used for a shorter form of long mul by 10.
3469 operand immL_127() %{
3470   predicate((0 <= n->get_long()) && (n->get_long() <= 127));
3471   match(ConL);
3472   op_cost(0);
3473 
3474   format %{ %}
3475   interface(CONST_INTER);
3476 %}
3477 
3478 // Long Immediate: low 32-bit mask
3479 operand immL_32bits() %{
3480   predicate(n->get_long() == 0xFFFFFFFFL);
3481   match(ConL);
3482   op_cost(0);
3483 
3484   format %{ %}
3485   interface(CONST_INTER);
3486 %}
3487 
3488 // Long Immediate: low 32-bit mask
3489 operand immL32() %{
3490   predicate(n->get_long() == (int)(n->get_long()));
3491   match(ConL);
3492   op_cost(20);
3493 
3494   format %{ %}
3495   interface(CONST_INTER);
3496 %}
3497 
3498 //Double Immediate zero
3499 operand immDPR0() %{
3500   // Do additional (and counter-intuitive) test against NaN to work around VC++
3501   // bug that generates code such that NaNs compare equal to 0.0
3502   predicate( UseSSE<=1 && n->getd() == 0.0 && !g_isnan(n->getd()) );
3503   match(ConD);
3504 
3505   op_cost(5);
3506   format %{ %}
3507   interface(CONST_INTER);
3508 %}
3509 
3510 // Double Immediate one
3511 operand immDPR1() %{
3512   predicate( UseSSE<=1 && n->getd() == 1.0 );
3513   match(ConD);
3514 
3515   op_cost(5);
3516   format %{ %}
3517   interface(CONST_INTER);
3518 %}
3519 
3520 // Double Immediate
3521 operand immDPR() %{
3522   predicate(UseSSE<=1);
3523   match(ConD);
3524 
3525   op_cost(5);
3526   format %{ %}
3527   interface(CONST_INTER);
3528 %}
3529 
3530 operand immD() %{
3531   predicate(UseSSE>=2);
3532   match(ConD);
3533 
3534   op_cost(5);
3535   format %{ %}
3536   interface(CONST_INTER);
3537 %}
3538 
3539 // Double Immediate zero
3540 operand immD0() %{
3541   // Do additional (and counter-intuitive) test against NaN to work around VC++
3542   // bug that generates code such that NaNs compare equal to 0.0 AND do not
3543   // compare equal to -0.0.
3544   predicate( UseSSE>=2 && jlong_cast(n->getd()) == 0 );
3545   match(ConD);
3546 
3547   format %{ %}
3548   interface(CONST_INTER);
3549 %}
3550 
3551 // Float Immediate zero
3552 operand immFPR0() %{
3553   predicate(UseSSE == 0 && n->getf() == 0.0F);
3554   match(ConF);
3555 
3556   op_cost(5);
3557   format %{ %}
3558   interface(CONST_INTER);
3559 %}
3560 
3561 // Float Immediate one
3562 operand immFPR1() %{
3563   predicate(UseSSE == 0 && n->getf() == 1.0F);
3564   match(ConF);
3565 
3566   op_cost(5);
3567   format %{ %}
3568   interface(CONST_INTER);
3569 %}
3570 
3571 // Float Immediate
3572 operand immFPR() %{
3573   predicate( UseSSE == 0 );
3574   match(ConF);
3575 
3576   op_cost(5);
3577   format %{ %}
3578   interface(CONST_INTER);
3579 %}
3580 
3581 // Float Immediate
3582 operand immF() %{
3583   predicate(UseSSE >= 1);
3584   match(ConF);
3585 
3586   op_cost(5);
3587   format %{ %}
3588   interface(CONST_INTER);
3589 %}
3590 
3591 // Float Immediate zero.  Zero and not -0.0
3592 operand immF0() %{
3593   predicate( UseSSE >= 1 && jint_cast(n->getf()) == 0 );
3594   match(ConF);
3595 
3596   op_cost(5);
3597   format %{ %}
3598   interface(CONST_INTER);
3599 %}
3600 
3601 // Immediates for special shifts (sign extend)
3602 
3603 // Constants for increment
3604 operand immI_16() %{
3605   predicate( n->get_int() == 16 );
3606   match(ConI);
3607 
3608   format %{ %}
3609   interface(CONST_INTER);
3610 %}
3611 
3612 operand immI_24() %{
3613   predicate( n->get_int() == 24 );
3614   match(ConI);
3615 
3616   format %{ %}
3617   interface(CONST_INTER);
3618 %}
3619 
3620 // Constant for byte-wide masking
3621 operand immI_255() %{
3622   predicate( n->get_int() == 255 );
3623   match(ConI);
3624 
3625   format %{ %}
3626   interface(CONST_INTER);
3627 %}
3628 
3629 // Constant for short-wide masking
3630 operand immI_65535() %{
3631   predicate(n->get_int() == 65535);
3632   match(ConI);
3633 
3634   format %{ %}
3635   interface(CONST_INTER);
3636 %}
3637 
3638 // Register Operands
3639 // Integer Register
3640 operand rRegI() %{
3641   constraint(ALLOC_IN_RC(int_reg));
3642   match(RegI);
3643   match(xRegI);
3644   match(eAXRegI);
3645   match(eBXRegI);
3646   match(eCXRegI);
3647   match(eDXRegI);
3648   match(eDIRegI);
3649   match(eSIRegI);
3650 
3651   format %{ %}
3652   interface(REG_INTER);
3653 %}
3654 
3655 // Subset of Integer Register
3656 operand xRegI(rRegI reg) %{
3657   constraint(ALLOC_IN_RC(int_x_reg));
3658   match(reg);
3659   match(eAXRegI);
3660   match(eBXRegI);
3661   match(eCXRegI);
3662   match(eDXRegI);
3663 
3664   format %{ %}
3665   interface(REG_INTER);
3666 %}
3667 
3668 // Special Registers
3669 operand eAXRegI(xRegI reg) %{
3670   constraint(ALLOC_IN_RC(eax_reg));
3671   match(reg);
3672   match(rRegI);
3673 
3674   format %{ "EAX" %}
3675   interface(REG_INTER);
3676 %}
3677 
3678 // Special Registers
3679 operand eBXRegI(xRegI reg) %{
3680   constraint(ALLOC_IN_RC(ebx_reg));
3681   match(reg);
3682   match(rRegI);
3683 
3684   format %{ "EBX" %}
3685   interface(REG_INTER);
3686 %}
3687 
3688 operand eCXRegI(xRegI reg) %{
3689   constraint(ALLOC_IN_RC(ecx_reg));
3690   match(reg);
3691   match(rRegI);
3692 
3693   format %{ "ECX" %}
3694   interface(REG_INTER);
3695 %}
3696 
3697 operand eDXRegI(xRegI reg) %{
3698   constraint(ALLOC_IN_RC(edx_reg));
3699   match(reg);
3700   match(rRegI);
3701 
3702   format %{ "EDX" %}
3703   interface(REG_INTER);
3704 %}
3705 
3706 operand eDIRegI(xRegI reg) %{
3707   constraint(ALLOC_IN_RC(edi_reg));
3708   match(reg);
3709   match(rRegI);
3710 
3711   format %{ "EDI" %}
3712   interface(REG_INTER);
3713 %}
3714 
3715 operand naxRegI() %{
3716   constraint(ALLOC_IN_RC(nax_reg));
3717   match(RegI);
3718   match(eCXRegI);
3719   match(eDXRegI);
3720   match(eSIRegI);
3721   match(eDIRegI);
3722 
3723   format %{ %}
3724   interface(REG_INTER);
3725 %}
3726 
3727 operand nadxRegI() %{
3728   constraint(ALLOC_IN_RC(nadx_reg));
3729   match(RegI);
3730   match(eBXRegI);
3731   match(eCXRegI);
3732   match(eSIRegI);
3733   match(eDIRegI);
3734 
3735   format %{ %}
3736   interface(REG_INTER);
3737 %}
3738 
3739 operand ncxRegI() %{
3740   constraint(ALLOC_IN_RC(ncx_reg));
3741   match(RegI);
3742   match(eAXRegI);
3743   match(eDXRegI);
3744   match(eSIRegI);
3745   match(eDIRegI);
3746 
3747   format %{ %}
3748   interface(REG_INTER);
3749 %}
3750 
3751 // // This operand was used by cmpFastUnlock, but conflicted with 'object' reg
3752 // //
3753 operand eSIRegI(xRegI reg) %{
3754    constraint(ALLOC_IN_RC(esi_reg));
3755    match(reg);
3756    match(rRegI);
3757 
3758    format %{ "ESI" %}
3759    interface(REG_INTER);
3760 %}
3761 
3762 // Pointer Register
3763 operand anyRegP() %{
3764   constraint(ALLOC_IN_RC(any_reg));
3765   match(RegP);
3766   match(eAXRegP);
3767   match(eBXRegP);
3768   match(eCXRegP);
3769   match(eDIRegP);
3770   match(eRegP);
3771 
3772   format %{ %}
3773   interface(REG_INTER);
3774 %}
3775 
3776 operand eRegP() %{
3777   constraint(ALLOC_IN_RC(int_reg));
3778   match(RegP);
3779   match(eAXRegP);
3780   match(eBXRegP);
3781   match(eCXRegP);
3782   match(eDIRegP);
3783 
3784   format %{ %}
3785   interface(REG_INTER);
3786 %}
3787 
3788 // On windows95, EBP is not safe to use for implicit null tests.
3789 operand eRegP_no_EBP() %{
3790   constraint(ALLOC_IN_RC(int_reg_no_rbp));
3791   match(RegP);
3792   match(eAXRegP);
3793   match(eBXRegP);
3794   match(eCXRegP);
3795   match(eDIRegP);
3796 
3797   op_cost(100);
3798   format %{ %}
3799   interface(REG_INTER);
3800 %}
3801 
3802 operand naxRegP() %{
3803   constraint(ALLOC_IN_RC(nax_reg));
3804   match(RegP);
3805   match(eBXRegP);
3806   match(eDXRegP);
3807   match(eCXRegP);
3808   match(eSIRegP);
3809   match(eDIRegP);
3810 
3811   format %{ %}
3812   interface(REG_INTER);
3813 %}
3814 
3815 operand nabxRegP() %{
3816   constraint(ALLOC_IN_RC(nabx_reg));
3817   match(RegP);
3818   match(eCXRegP);
3819   match(eDXRegP);
3820   match(eSIRegP);
3821   match(eDIRegP);
3822 
3823   format %{ %}
3824   interface(REG_INTER);
3825 %}
3826 
3827 operand pRegP() %{
3828   constraint(ALLOC_IN_RC(p_reg));
3829   match(RegP);
3830   match(eBXRegP);
3831   match(eDXRegP);
3832   match(eSIRegP);
3833   match(eDIRegP);
3834 
3835   format %{ %}
3836   interface(REG_INTER);
3837 %}
3838 
3839 // Special Registers
3840 // Return a pointer value
3841 operand eAXRegP(eRegP reg) %{
3842   constraint(ALLOC_IN_RC(eax_reg));
3843   match(reg);
3844   format %{ "EAX" %}
3845   interface(REG_INTER);
3846 %}
3847 
3848 // Used in AtomicAdd
3849 operand eBXRegP(eRegP reg) %{
3850   constraint(ALLOC_IN_RC(ebx_reg));
3851   match(reg);
3852   format %{ "EBX" %}
3853   interface(REG_INTER);
3854 %}
3855 
3856 // Tail-call (interprocedural jump) to interpreter
3857 operand eCXRegP(eRegP reg) %{
3858   constraint(ALLOC_IN_RC(ecx_reg));
3859   match(reg);
3860   format %{ "ECX" %}
3861   interface(REG_INTER);
3862 %}
3863 
3864 operand eSIRegP(eRegP reg) %{
3865   constraint(ALLOC_IN_RC(esi_reg));
3866   match(reg);
3867   format %{ "ESI" %}
3868   interface(REG_INTER);
3869 %}
3870 
3871 // Used in rep stosw
3872 operand eDIRegP(eRegP reg) %{
3873   constraint(ALLOC_IN_RC(edi_reg));
3874   match(reg);
3875   format %{ "EDI" %}
3876   interface(REG_INTER);
3877 %}
3878 
3879 operand eBPRegP() %{
3880   constraint(ALLOC_IN_RC(ebp_reg));
3881   match(RegP);
3882   format %{ "EBP" %}
3883   interface(REG_INTER);
3884 %}
3885 
3886 operand eRegL() %{
3887   constraint(ALLOC_IN_RC(long_reg));
3888   match(RegL);
3889   match(eADXRegL);
3890 
3891   format %{ %}
3892   interface(REG_INTER);
3893 %}
3894 
3895 operand eADXRegL( eRegL reg ) %{
3896   constraint(ALLOC_IN_RC(eadx_reg));
3897   match(reg);
3898 
3899   format %{ "EDX:EAX" %}
3900   interface(REG_INTER);
3901 %}
3902 
3903 operand eBCXRegL( eRegL reg ) %{
3904   constraint(ALLOC_IN_RC(ebcx_reg));
3905   match(reg);
3906 
3907   format %{ "EBX:ECX" %}
3908   interface(REG_INTER);
3909 %}
3910 
3911 // Special case for integer high multiply
3912 operand eADXRegL_low_only() %{
3913   constraint(ALLOC_IN_RC(eadx_reg));
3914   match(RegL);
3915 
3916   format %{ "EAX" %}
3917   interface(REG_INTER);
3918 %}
3919 
3920 // Flags register, used as output of compare instructions
3921 operand eFlagsReg() %{
3922   constraint(ALLOC_IN_RC(int_flags));
3923   match(RegFlags);
3924 
3925   format %{ "EFLAGS" %}
3926   interface(REG_INTER);
3927 %}
3928 
3929 // Flags register, used as output of FLOATING POINT compare instructions
3930 operand eFlagsRegU() %{
3931   constraint(ALLOC_IN_RC(int_flags));
3932   match(RegFlags);
3933 
3934   format %{ "EFLAGS_U" %}
3935   interface(REG_INTER);
3936 %}
3937 
3938 operand eFlagsRegUCF() %{
3939   constraint(ALLOC_IN_RC(int_flags));
3940   match(RegFlags);
3941   predicate(false);
3942 
3943   format %{ "EFLAGS_U_CF" %}
3944   interface(REG_INTER);
3945 %}
3946 
3947 // Condition Code Register used by long compare
3948 operand flagsReg_long_LTGE() %{
3949   constraint(ALLOC_IN_RC(int_flags));
3950   match(RegFlags);
3951   format %{ "FLAGS_LTGE" %}
3952   interface(REG_INTER);
3953 %}
3954 operand flagsReg_long_EQNE() %{
3955   constraint(ALLOC_IN_RC(int_flags));
3956   match(RegFlags);
3957   format %{ "FLAGS_EQNE" %}
3958   interface(REG_INTER);
3959 %}
3960 operand flagsReg_long_LEGT() %{
3961   constraint(ALLOC_IN_RC(int_flags));
3962   match(RegFlags);
3963   format %{ "FLAGS_LEGT" %}
3964   interface(REG_INTER);
3965 %}
3966 
3967 // Float register operands
3968 operand regDPR() %{
3969   predicate( UseSSE < 2 );
3970   constraint(ALLOC_IN_RC(fp_dbl_reg));
3971   match(RegD);
3972   match(regDPR1);
3973   match(regDPR2);
3974   format %{ %}
3975   interface(REG_INTER);
3976 %}
3977 
3978 operand regDPR1(regDPR reg) %{
3979   predicate( UseSSE < 2 );
3980   constraint(ALLOC_IN_RC(fp_dbl_reg0));
3981   match(reg);
3982   format %{ "FPR1" %}
3983   interface(REG_INTER);
3984 %}
3985 
3986 operand regDPR2(regDPR reg) %{
3987   predicate( UseSSE < 2 );
3988   constraint(ALLOC_IN_RC(fp_dbl_reg1));
3989   match(reg);
3990   format %{ "FPR2" %}
3991   interface(REG_INTER);
3992 %}
3993 
3994 operand regnotDPR1(regDPR reg) %{
3995   predicate( UseSSE < 2 );
3996   constraint(ALLOC_IN_RC(fp_dbl_notreg0));
3997   match(reg);
3998   format %{ %}
3999   interface(REG_INTER);
4000 %}
4001 
4002 // Float register operands
4003 operand regFPR() %{
4004   predicate( UseSSE < 2 );
4005   constraint(ALLOC_IN_RC(fp_flt_reg));
4006   match(RegF);
4007   match(regFPR1);
4008   format %{ %}
4009   interface(REG_INTER);
4010 %}
4011 
4012 // Float register operands
4013 operand regFPR1(regFPR reg) %{
4014   predicate( UseSSE < 2 );
4015   constraint(ALLOC_IN_RC(fp_flt_reg0));
4016   match(reg);
4017   format %{ "FPR1" %}
4018   interface(REG_INTER);
4019 %}
4020 
4021 // XMM Float register operands
4022 operand regF() %{
4023   predicate( UseSSE>=1 );
4024   constraint(ALLOC_IN_RC(float_reg));
4025   match(RegF);
4026   format %{ %}
4027   interface(REG_INTER);
4028 %}
4029 
4030 // XMM Double register operands
4031 operand regD() %{
4032   predicate( UseSSE>=2 );
4033   constraint(ALLOC_IN_RC(double_reg));
4034   match(RegD);
4035   format %{ %}
4036   interface(REG_INTER);
4037 %}
4038 
4039 
4040 //----------Memory Operands----------------------------------------------------
4041 // Direct Memory Operand
4042 operand direct(immP addr) %{
4043   match(addr);
4044 
4045   format %{ "[$addr]" %}
4046   interface(MEMORY_INTER) %{
4047     base(0xFFFFFFFF);
4048     index(0x4);
4049     scale(0x0);
4050     disp($addr);
4051   %}
4052 %}
4053 
4054 // Indirect Memory Operand
4055 operand indirect(eRegP reg) %{
4056   constraint(ALLOC_IN_RC(int_reg));
4057   match(reg);
4058 
4059   format %{ "[$reg]" %}
4060   interface(MEMORY_INTER) %{
4061     base($reg);
4062     index(0x4);
4063     scale(0x0);
4064     disp(0x0);
4065   %}
4066 %}
4067 
4068 // Indirect Memory Plus Short Offset Operand
4069 operand indOffset8(eRegP reg, immI8 off) %{
4070   match(AddP reg off);
4071 
4072   format %{ "[$reg + $off]" %}
4073   interface(MEMORY_INTER) %{
4074     base($reg);
4075     index(0x4);
4076     scale(0x0);
4077     disp($off);
4078   %}
4079 %}
4080 
4081 // Indirect Memory Plus Long Offset Operand
4082 operand indOffset32(eRegP reg, immI off) %{
4083   match(AddP reg off);
4084 
4085   format %{ "[$reg + $off]" %}
4086   interface(MEMORY_INTER) %{
4087     base($reg);
4088     index(0x4);
4089     scale(0x0);
4090     disp($off);
4091   %}
4092 %}
4093 
4094 // Indirect Memory Plus Long Offset Operand
4095 operand indOffset32X(rRegI reg, immP off) %{
4096   match(AddP off reg);
4097 
4098   format %{ "[$reg + $off]" %}
4099   interface(MEMORY_INTER) %{
4100     base($reg);
4101     index(0x4);
4102     scale(0x0);
4103     disp($off);
4104   %}
4105 %}
4106 
4107 // Indirect Memory Plus Index Register Plus Offset Operand
4108 operand indIndexOffset(eRegP reg, rRegI ireg, immI off) %{
4109   match(AddP (AddP reg ireg) off);
4110 
4111   op_cost(10);
4112   format %{"[$reg + $off + $ireg]" %}
4113   interface(MEMORY_INTER) %{
4114     base($reg);
4115     index($ireg);
4116     scale(0x0);
4117     disp($off);
4118   %}
4119 %}
4120 
4121 // Indirect Memory Plus Index Register Plus Offset Operand
4122 operand indIndex(eRegP reg, rRegI ireg) %{
4123   match(AddP reg ireg);
4124 
4125   op_cost(10);
4126   format %{"[$reg + $ireg]" %}
4127   interface(MEMORY_INTER) %{
4128     base($reg);
4129     index($ireg);
4130     scale(0x0);
4131     disp(0x0);
4132   %}
4133 %}
4134 
4135 // // -------------------------------------------------------------------------
4136 // // 486 architecture doesn't support "scale * index + offset" with out a base
4137 // // -------------------------------------------------------------------------
4138 // // Scaled Memory Operands
4139 // // Indirect Memory Times Scale Plus Offset Operand
4140 // operand indScaleOffset(immP off, rRegI ireg, immI2 scale) %{
4141 //   match(AddP off (LShiftI ireg scale));
4142 //
4143 //   op_cost(10);
4144 //   format %{"[$off + $ireg << $scale]" %}
4145 //   interface(MEMORY_INTER) %{
4146 //     base(0x4);
4147 //     index($ireg);
4148 //     scale($scale);
4149 //     disp($off);
4150 //   %}
4151 // %}
4152 
4153 // Indirect Memory Times Scale Plus Index Register
4154 operand indIndexScale(eRegP reg, rRegI ireg, immI2 scale) %{
4155   match(AddP reg (LShiftI ireg scale));
4156 
4157   op_cost(10);
4158   format %{"[$reg + $ireg << $scale]" %}
4159   interface(MEMORY_INTER) %{
4160     base($reg);
4161     index($ireg);
4162     scale($scale);
4163     disp(0x0);
4164   %}
4165 %}
4166 
4167 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
4168 operand indIndexScaleOffset(eRegP reg, immI off, rRegI ireg, immI2 scale) %{
4169   match(AddP (AddP reg (LShiftI ireg scale)) off);
4170 
4171   op_cost(10);
4172   format %{"[$reg + $off + $ireg << $scale]" %}
4173   interface(MEMORY_INTER) %{
4174     base($reg);
4175     index($ireg);
4176     scale($scale);
4177     disp($off);
4178   %}
4179 %}
4180 
4181 //----------Load Long Memory Operands------------------------------------------
4182 // The load-long idiom will use it's address expression again after loading
4183 // the first word of the long.  If the load-long destination overlaps with
4184 // registers used in the addressing expression, the 2nd half will be loaded
4185 // from a clobbered address.  Fix this by requiring that load-long use
4186 // address registers that do not overlap with the load-long target.
4187 
4188 // load-long support
4189 operand load_long_RegP() %{
4190   constraint(ALLOC_IN_RC(esi_reg));
4191   match(RegP);
4192   match(eSIRegP);
4193   op_cost(100);
4194   format %{  %}
4195   interface(REG_INTER);
4196 %}
4197 
4198 // Indirect Memory Operand Long
4199 operand load_long_indirect(load_long_RegP reg) %{
4200   constraint(ALLOC_IN_RC(esi_reg));
4201   match(reg);
4202 
4203   format %{ "[$reg]" %}
4204   interface(MEMORY_INTER) %{
4205     base($reg);
4206     index(0x4);
4207     scale(0x0);
4208     disp(0x0);
4209   %}
4210 %}
4211 
4212 // Indirect Memory Plus Long Offset Operand
4213 operand load_long_indOffset32(load_long_RegP reg, immI off) %{
4214   match(AddP reg off);
4215 
4216   format %{ "[$reg + $off]" %}
4217   interface(MEMORY_INTER) %{
4218     base($reg);
4219     index(0x4);
4220     scale(0x0);
4221     disp($off);
4222   %}
4223 %}
4224 
4225 opclass load_long_memory(load_long_indirect, load_long_indOffset32);
4226 
4227 
4228 //----------Special Memory Operands--------------------------------------------
4229 // Stack Slot Operand - This operand is used for loading and storing temporary
4230 //                      values on the stack where a match requires a value to
4231 //                      flow through memory.
4232 operand stackSlotP(sRegP reg) %{
4233   constraint(ALLOC_IN_RC(stack_slots));
4234   // No match rule because this operand is only generated in matching
4235   format %{ "[$reg]" %}
4236   interface(MEMORY_INTER) %{
4237     base(0x4);   // ESP
4238     index(0x4);  // No Index
4239     scale(0x0);  // No Scale
4240     disp($reg);  // Stack Offset
4241   %}
4242 %}
4243 
4244 operand stackSlotI(sRegI reg) %{
4245   constraint(ALLOC_IN_RC(stack_slots));
4246   // No match rule because this operand is only generated in matching
4247   format %{ "[$reg]" %}
4248   interface(MEMORY_INTER) %{
4249     base(0x4);   // ESP
4250     index(0x4);  // No Index
4251     scale(0x0);  // No Scale
4252     disp($reg);  // Stack Offset
4253   %}
4254 %}
4255 
4256 operand stackSlotF(sRegF reg) %{
4257   constraint(ALLOC_IN_RC(stack_slots));
4258   // No match rule because this operand is only generated in matching
4259   format %{ "[$reg]" %}
4260   interface(MEMORY_INTER) %{
4261     base(0x4);   // ESP
4262     index(0x4);  // No Index
4263     scale(0x0);  // No Scale
4264     disp($reg);  // Stack Offset
4265   %}
4266 %}
4267 
4268 operand stackSlotD(sRegD reg) %{
4269   constraint(ALLOC_IN_RC(stack_slots));
4270   // No match rule because this operand is only generated in matching
4271   format %{ "[$reg]" %}
4272   interface(MEMORY_INTER) %{
4273     base(0x4);   // ESP
4274     index(0x4);  // No Index
4275     scale(0x0);  // No Scale
4276     disp($reg);  // Stack Offset
4277   %}
4278 %}
4279 
4280 operand stackSlotL(sRegL reg) %{
4281   constraint(ALLOC_IN_RC(stack_slots));
4282   // No match rule because this operand is only generated in matching
4283   format %{ "[$reg]" %}
4284   interface(MEMORY_INTER) %{
4285     base(0x4);   // ESP
4286     index(0x4);  // No Index
4287     scale(0x0);  // No Scale
4288     disp($reg);  // Stack Offset
4289   %}
4290 %}
4291 
4292 //----------Memory Operands - Win95 Implicit Null Variants----------------
4293 // Indirect Memory Operand
4294 operand indirect_win95_safe(eRegP_no_EBP reg)
4295 %{
4296   constraint(ALLOC_IN_RC(int_reg));
4297   match(reg);
4298 
4299   op_cost(100);
4300   format %{ "[$reg]" %}
4301   interface(MEMORY_INTER) %{
4302     base($reg);
4303     index(0x4);
4304     scale(0x0);
4305     disp(0x0);
4306   %}
4307 %}
4308 
4309 // Indirect Memory Plus Short Offset Operand
4310 operand indOffset8_win95_safe(eRegP_no_EBP reg, immI8 off)
4311 %{
4312   match(AddP reg off);
4313 
4314   op_cost(100);
4315   format %{ "[$reg + $off]" %}
4316   interface(MEMORY_INTER) %{
4317     base($reg);
4318     index(0x4);
4319     scale(0x0);
4320     disp($off);
4321   %}
4322 %}
4323 
4324 // Indirect Memory Plus Long Offset Operand
4325 operand indOffset32_win95_safe(eRegP_no_EBP reg, immI off)
4326 %{
4327   match(AddP reg off);
4328 
4329   op_cost(100);
4330   format %{ "[$reg + $off]" %}
4331   interface(MEMORY_INTER) %{
4332     base($reg);
4333     index(0x4);
4334     scale(0x0);
4335     disp($off);
4336   %}
4337 %}
4338 
4339 // Indirect Memory Plus Index Register Plus Offset Operand
4340 operand indIndexOffset_win95_safe(eRegP_no_EBP reg, rRegI ireg, immI off)
4341 %{
4342   match(AddP (AddP reg ireg) off);
4343 
4344   op_cost(100);
4345   format %{"[$reg + $off + $ireg]" %}
4346   interface(MEMORY_INTER) %{
4347     base($reg);
4348     index($ireg);
4349     scale(0x0);
4350     disp($off);
4351   %}
4352 %}
4353 
4354 // Indirect Memory Times Scale Plus Index Register
4355 operand indIndexScale_win95_safe(eRegP_no_EBP reg, rRegI ireg, immI2 scale)
4356 %{
4357   match(AddP reg (LShiftI ireg scale));
4358 
4359   op_cost(100);
4360   format %{"[$reg + $ireg << $scale]" %}
4361   interface(MEMORY_INTER) %{
4362     base($reg);
4363     index($ireg);
4364     scale($scale);
4365     disp(0x0);
4366   %}
4367 %}
4368 
4369 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
4370 operand indIndexScaleOffset_win95_safe(eRegP_no_EBP reg, immI off, rRegI ireg, immI2 scale)
4371 %{
4372   match(AddP (AddP reg (LShiftI ireg scale)) off);
4373 
4374   op_cost(100);
4375   format %{"[$reg + $off + $ireg << $scale]" %}
4376   interface(MEMORY_INTER) %{
4377     base($reg);
4378     index($ireg);
4379     scale($scale);
4380     disp($off);
4381   %}
4382 %}
4383 
4384 //----------Conditional Branch Operands----------------------------------------
4385 // Comparison Op  - This is the operation of the comparison, and is limited to
4386 //                  the following set of codes:
4387 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
4388 //
4389 // Other attributes of the comparison, such as unsignedness, are specified
4390 // by the comparison instruction that sets a condition code flags register.
4391 // That result is represented by a flags operand whose subtype is appropriate
4392 // to the unsignedness (etc.) of the comparison.
4393 //
4394 // Later, the instruction which matches both the Comparison Op (a Bool) and
4395 // the flags (produced by the Cmp) specifies the coding of the comparison op
4396 // by matching a specific subtype of Bool operand below, such as cmpOpU.
4397 
4398 // Comparision Code
4399 operand cmpOp() %{
4400   match(Bool);
4401 
4402   format %{ "" %}
4403   interface(COND_INTER) %{
4404     equal(0x4, "e");
4405     not_equal(0x5, "ne");
4406     less(0xC, "l");
4407     greater_equal(0xD, "ge");
4408     less_equal(0xE, "le");
4409     greater(0xF, "g");
4410     overflow(0x0, "o");
4411     no_overflow(0x1, "no");
4412   %}
4413 %}
4414 
4415 // Comparison Code, unsigned compare.  Used by FP also, with
4416 // C2 (unordered) turned into GT or LT already.  The other bits
4417 // C0 and C3 are turned into Carry & Zero flags.
4418 operand cmpOpU() %{
4419   match(Bool);
4420 
4421   format %{ "" %}
4422   interface(COND_INTER) %{
4423     equal(0x4, "e");
4424     not_equal(0x5, "ne");
4425     less(0x2, "b");
4426     greater_equal(0x3, "nb");
4427     less_equal(0x6, "be");
4428     greater(0x7, "nbe");
4429     overflow(0x0, "o");
4430     no_overflow(0x1, "no");
4431   %}
4432 %}
4433 
4434 // Floating comparisons that don't require any fixup for the unordered case
4435 operand cmpOpUCF() %{
4436   match(Bool);
4437   predicate(n->as_Bool()->_test._test == BoolTest::lt ||
4438             n->as_Bool()->_test._test == BoolTest::ge ||
4439             n->as_Bool()->_test._test == BoolTest::le ||
4440             n->as_Bool()->_test._test == BoolTest::gt);
4441   format %{ "" %}
4442   interface(COND_INTER) %{
4443     equal(0x4, "e");
4444     not_equal(0x5, "ne");
4445     less(0x2, "b");
4446     greater_equal(0x3, "nb");
4447     less_equal(0x6, "be");
4448     greater(0x7, "nbe");
4449     overflow(0x0, "o");
4450     no_overflow(0x1, "no");
4451   %}
4452 %}
4453 
4454 
4455 // Floating comparisons that can be fixed up with extra conditional jumps
4456 operand cmpOpUCF2() %{
4457   match(Bool);
4458   predicate(n->as_Bool()->_test._test == BoolTest::ne ||
4459             n->as_Bool()->_test._test == BoolTest::eq);
4460   format %{ "" %}
4461   interface(COND_INTER) %{
4462     equal(0x4, "e");
4463     not_equal(0x5, "ne");
4464     less(0x2, "b");
4465     greater_equal(0x3, "nb");
4466     less_equal(0x6, "be");
4467     greater(0x7, "nbe");
4468     overflow(0x0, "o");
4469     no_overflow(0x1, "no");
4470   %}
4471 %}
4472 
4473 // Comparison Code for FP conditional move
4474 operand cmpOp_fcmov() %{
4475   match(Bool);
4476 
4477   predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
4478             n->as_Bool()->_test._test != BoolTest::no_overflow);
4479   format %{ "" %}
4480   interface(COND_INTER) %{
4481     equal        (0x0C8);
4482     not_equal    (0x1C8);
4483     less         (0x0C0);
4484     greater_equal(0x1C0);
4485     less_equal   (0x0D0);
4486     greater      (0x1D0);
4487     overflow(0x0, "o"); // not really supported by the instruction
4488     no_overflow(0x1, "no"); // not really supported by the instruction
4489   %}
4490 %}
4491 
4492 // Comparision Code used in long compares
4493 operand cmpOp_commute() %{
4494   match(Bool);
4495 
4496   format %{ "" %}
4497   interface(COND_INTER) %{
4498     equal(0x4, "e");
4499     not_equal(0x5, "ne");
4500     less(0xF, "g");
4501     greater_equal(0xE, "le");
4502     less_equal(0xD, "ge");
4503     greater(0xC, "l");
4504     overflow(0x0, "o");
4505     no_overflow(0x1, "no");
4506   %}
4507 %}
4508 
4509 //----------OPERAND CLASSES----------------------------------------------------
4510 // Operand Classes are groups of operands that are used as to simplify
4511 // instruction definitions by not requiring the AD writer to specify separate
4512 // instructions for every form of operand when the instruction accepts
4513 // multiple operand types with the same basic encoding and format.  The classic
4514 // case of this is memory operands.
4515 
4516 opclass memory(direct, indirect, indOffset8, indOffset32, indOffset32X, indIndexOffset,
4517                indIndex, indIndexScale, indIndexScaleOffset);
4518 
4519 // Long memory operations are encoded in 2 instructions and a +4 offset.
4520 // This means some kind of offset is always required and you cannot use
4521 // an oop as the offset (done when working on static globals).
4522 opclass long_memory(direct, indirect, indOffset8, indOffset32, indIndexOffset,
4523                     indIndex, indIndexScale, indIndexScaleOffset);
4524 
4525 
4526 //----------PIPELINE-----------------------------------------------------------
4527 // Rules which define the behavior of the target architectures pipeline.
4528 pipeline %{
4529 
4530 //----------ATTRIBUTES---------------------------------------------------------
4531 attributes %{
4532   variable_size_instructions;        // Fixed size instructions
4533   max_instructions_per_bundle = 3;   // Up to 3 instructions per bundle
4534   instruction_unit_size = 1;         // An instruction is 1 bytes long
4535   instruction_fetch_unit_size = 16;  // The processor fetches one line
4536   instruction_fetch_units = 1;       // of 16 bytes
4537 
4538   // List of nop instructions
4539   nops( MachNop );
4540 %}
4541 
4542 //----------RESOURCES----------------------------------------------------------
4543 // Resources are the functional units available to the machine
4544 
4545 // Generic P2/P3 pipeline
4546 // 3 decoders, only D0 handles big operands; a "bundle" is the limit of
4547 // 3 instructions decoded per cycle.
4548 // 2 load/store ops per cycle, 1 branch, 1 FPU,
4549 // 2 ALU op, only ALU0 handles mul/div instructions.
4550 resources( D0, D1, D2, DECODE = D0 | D1 | D2,
4551            MS0, MS1, MEM = MS0 | MS1,
4552            BR, FPU,
4553            ALU0, ALU1, ALU = ALU0 | ALU1 );
4554 
4555 //----------PIPELINE DESCRIPTION-----------------------------------------------
4556 // Pipeline Description specifies the stages in the machine's pipeline
4557 
4558 // Generic P2/P3 pipeline
4559 pipe_desc(S0, S1, S2, S3, S4, S5);
4560 
4561 //----------PIPELINE CLASSES---------------------------------------------------
4562 // Pipeline Classes describe the stages in which input and output are
4563 // referenced by the hardware pipeline.
4564 
4565 // Naming convention: ialu or fpu
4566 // Then: _reg
4567 // Then: _reg if there is a 2nd register
4568 // Then: _long if it's a pair of instructions implementing a long
4569 // Then: _fat if it requires the big decoder
4570 //   Or: _mem if it requires the big decoder and a memory unit.
4571 
4572 // Integer ALU reg operation
4573 pipe_class ialu_reg(rRegI dst) %{
4574     single_instruction;
4575     dst    : S4(write);
4576     dst    : S3(read);
4577     DECODE : S0;        // any decoder
4578     ALU    : S3;        // any alu
4579 %}
4580 
4581 // Long ALU reg operation
4582 pipe_class ialu_reg_long(eRegL dst) %{
4583     instruction_count(2);
4584     dst    : S4(write);
4585     dst    : S3(read);
4586     DECODE : S0(2);     // any 2 decoders
4587     ALU    : S3(2);     // both alus
4588 %}
4589 
4590 // Integer ALU reg operation using big decoder
4591 pipe_class ialu_reg_fat(rRegI dst) %{
4592     single_instruction;
4593     dst    : S4(write);
4594     dst    : S3(read);
4595     D0     : S0;        // big decoder only
4596     ALU    : S3;        // any alu
4597 %}
4598 
4599 // Long ALU reg operation using big decoder
4600 pipe_class ialu_reg_long_fat(eRegL dst) %{
4601     instruction_count(2);
4602     dst    : S4(write);
4603     dst    : S3(read);
4604     D0     : S0(2);     // big decoder only; twice
4605     ALU    : S3(2);     // any 2 alus
4606 %}
4607 
4608 // Integer ALU reg-reg operation
4609 pipe_class ialu_reg_reg(rRegI dst, rRegI src) %{
4610     single_instruction;
4611     dst    : S4(write);
4612     src    : S3(read);
4613     DECODE : S0;        // any decoder
4614     ALU    : S3;        // any alu
4615 %}
4616 
4617 // Long ALU reg-reg operation
4618 pipe_class ialu_reg_reg_long(eRegL dst, eRegL src) %{
4619     instruction_count(2);
4620     dst    : S4(write);
4621     src    : S3(read);
4622     DECODE : S0(2);     // any 2 decoders
4623     ALU    : S3(2);     // both alus
4624 %}
4625 
4626 // Integer ALU reg-reg operation
4627 pipe_class ialu_reg_reg_fat(rRegI dst, memory src) %{
4628     single_instruction;
4629     dst    : S4(write);
4630     src    : S3(read);
4631     D0     : S0;        // big decoder only
4632     ALU    : S3;        // any alu
4633 %}
4634 
4635 // Long ALU reg-reg operation
4636 pipe_class ialu_reg_reg_long_fat(eRegL dst, eRegL src) %{
4637     instruction_count(2);
4638     dst    : S4(write);
4639     src    : S3(read);
4640     D0     : S0(2);     // big decoder only; twice
4641     ALU    : S3(2);     // both alus
4642 %}
4643 
4644 // Integer ALU reg-mem operation
4645 pipe_class ialu_reg_mem(rRegI dst, memory mem) %{
4646     single_instruction;
4647     dst    : S5(write);
4648     mem    : S3(read);
4649     D0     : S0;        // big decoder only
4650     ALU    : S4;        // any alu
4651     MEM    : S3;        // any mem
4652 %}
4653 
4654 // Long ALU reg-mem operation
4655 pipe_class ialu_reg_long_mem(eRegL dst, load_long_memory mem) %{
4656     instruction_count(2);
4657     dst    : S5(write);
4658     mem    : S3(read);
4659     D0     : S0(2);     // big decoder only; twice
4660     ALU    : S4(2);     // any 2 alus
4661     MEM    : S3(2);     // both mems
4662 %}
4663 
4664 // Integer mem operation (prefetch)
4665 pipe_class ialu_mem(memory mem)
4666 %{
4667     single_instruction;
4668     mem    : S3(read);
4669     D0     : S0;        // big decoder only
4670     MEM    : S3;        // any mem
4671 %}
4672 
4673 // Integer Store to Memory
4674 pipe_class ialu_mem_reg(memory mem, rRegI src) %{
4675     single_instruction;
4676     mem    : S3(read);
4677     src    : S5(read);
4678     D0     : S0;        // big decoder only
4679     ALU    : S4;        // any alu
4680     MEM    : S3;
4681 %}
4682 
4683 // Long Store to Memory
4684 pipe_class ialu_mem_long_reg(memory mem, eRegL src) %{
4685     instruction_count(2);
4686     mem    : S3(read);
4687     src    : S5(read);
4688     D0     : S0(2);     // big decoder only; twice
4689     ALU    : S4(2);     // any 2 alus
4690     MEM    : S3(2);     // Both mems
4691 %}
4692 
4693 // Integer Store to Memory
4694 pipe_class ialu_mem_imm(memory mem) %{
4695     single_instruction;
4696     mem    : S3(read);
4697     D0     : S0;        // big decoder only
4698     ALU    : S4;        // any alu
4699     MEM    : S3;
4700 %}
4701 
4702 // Integer ALU0 reg-reg operation
4703 pipe_class ialu_reg_reg_alu0(rRegI dst, rRegI src) %{
4704     single_instruction;
4705     dst    : S4(write);
4706     src    : S3(read);
4707     D0     : S0;        // Big decoder only
4708     ALU0   : S3;        // only alu0
4709 %}
4710 
4711 // Integer ALU0 reg-mem operation
4712 pipe_class ialu_reg_mem_alu0(rRegI dst, memory mem) %{
4713     single_instruction;
4714     dst    : S5(write);
4715     mem    : S3(read);
4716     D0     : S0;        // big decoder only
4717     ALU0   : S4;        // ALU0 only
4718     MEM    : S3;        // any mem
4719 %}
4720 
4721 // Integer ALU reg-reg operation
4722 pipe_class ialu_cr_reg_reg(eFlagsReg cr, rRegI src1, rRegI src2) %{
4723     single_instruction;
4724     cr     : S4(write);
4725     src1   : S3(read);
4726     src2   : S3(read);
4727     DECODE : S0;        // any decoder
4728     ALU    : S3;        // any alu
4729 %}
4730 
4731 // Integer ALU reg-imm operation
4732 pipe_class ialu_cr_reg_imm(eFlagsReg cr, rRegI src1) %{
4733     single_instruction;
4734     cr     : S4(write);
4735     src1   : S3(read);
4736     DECODE : S0;        // any decoder
4737     ALU    : S3;        // any alu
4738 %}
4739 
4740 // Integer ALU reg-mem operation
4741 pipe_class ialu_cr_reg_mem(eFlagsReg cr, rRegI src1, memory src2) %{
4742     single_instruction;
4743     cr     : S4(write);
4744     src1   : S3(read);
4745     src2   : S3(read);
4746     D0     : S0;        // big decoder only
4747     ALU    : S4;        // any alu
4748     MEM    : S3;
4749 %}
4750 
4751 // Conditional move reg-reg
4752 pipe_class pipe_cmplt( rRegI p, rRegI q, rRegI y ) %{
4753     instruction_count(4);
4754     y      : S4(read);
4755     q      : S3(read);
4756     p      : S3(read);
4757     DECODE : S0(4);     // any decoder
4758 %}
4759 
4760 // Conditional move reg-reg
4761 pipe_class pipe_cmov_reg( rRegI dst, rRegI src, eFlagsReg cr ) %{
4762     single_instruction;
4763     dst    : S4(write);
4764     src    : S3(read);
4765     cr     : S3(read);
4766     DECODE : S0;        // any decoder
4767 %}
4768 
4769 // Conditional move reg-mem
4770 pipe_class pipe_cmov_mem( eFlagsReg cr, rRegI dst, memory src) %{
4771     single_instruction;
4772     dst    : S4(write);
4773     src    : S3(read);
4774     cr     : S3(read);
4775     DECODE : S0;        // any decoder
4776     MEM    : S3;
4777 %}
4778 
4779 // Conditional move reg-reg long
4780 pipe_class pipe_cmov_reg_long( eFlagsReg cr, eRegL dst, eRegL src) %{
4781     single_instruction;
4782     dst    : S4(write);
4783     src    : S3(read);
4784     cr     : S3(read);
4785     DECODE : S0(2);     // any 2 decoders
4786 %}
4787 
4788 // Conditional move double reg-reg
4789 pipe_class pipe_cmovDPR_reg( eFlagsReg cr, regDPR1 dst, regDPR src) %{
4790     single_instruction;
4791     dst    : S4(write);
4792     src    : S3(read);
4793     cr     : S3(read);
4794     DECODE : S0;        // any decoder
4795 %}
4796 
4797 // Float reg-reg operation
4798 pipe_class fpu_reg(regDPR dst) %{
4799     instruction_count(2);
4800     dst    : S3(read);
4801     DECODE : S0(2);     // any 2 decoders
4802     FPU    : S3;
4803 %}
4804 
4805 // Float reg-reg operation
4806 pipe_class fpu_reg_reg(regDPR dst, regDPR src) %{
4807     instruction_count(2);
4808     dst    : S4(write);
4809     src    : S3(read);
4810     DECODE : S0(2);     // any 2 decoders
4811     FPU    : S3;
4812 %}
4813 
4814 // Float reg-reg operation
4815 pipe_class fpu_reg_reg_reg(regDPR dst, regDPR src1, regDPR src2) %{
4816     instruction_count(3);
4817     dst    : S4(write);
4818     src1   : S3(read);
4819     src2   : S3(read);
4820     DECODE : S0(3);     // any 3 decoders
4821     FPU    : S3(2);
4822 %}
4823 
4824 // Float reg-reg operation
4825 pipe_class fpu_reg_reg_reg_reg(regDPR dst, regDPR src1, regDPR src2, regDPR src3) %{
4826     instruction_count(4);
4827     dst    : S4(write);
4828     src1   : S3(read);
4829     src2   : S3(read);
4830     src3   : S3(read);
4831     DECODE : S0(4);     // any 3 decoders
4832     FPU    : S3(2);
4833 %}
4834 
4835 // Float reg-reg operation
4836 pipe_class fpu_reg_mem_reg_reg(regDPR dst, memory src1, regDPR src2, regDPR src3) %{
4837     instruction_count(4);
4838     dst    : S4(write);
4839     src1   : S3(read);
4840     src2   : S3(read);
4841     src3   : S3(read);
4842     DECODE : S1(3);     // any 3 decoders
4843     D0     : S0;        // Big decoder only
4844     FPU    : S3(2);
4845     MEM    : S3;
4846 %}
4847 
4848 // Float reg-mem operation
4849 pipe_class fpu_reg_mem(regDPR dst, memory mem) %{
4850     instruction_count(2);
4851     dst    : S5(write);
4852     mem    : S3(read);
4853     D0     : S0;        // big decoder only
4854     DECODE : S1;        // any decoder for FPU POP
4855     FPU    : S4;
4856     MEM    : S3;        // any mem
4857 %}
4858 
4859 // Float reg-mem operation
4860 pipe_class fpu_reg_reg_mem(regDPR dst, regDPR src1, memory mem) %{
4861     instruction_count(3);
4862     dst    : S5(write);
4863     src1   : S3(read);
4864     mem    : S3(read);
4865     D0     : S0;        // big decoder only
4866     DECODE : S1(2);     // any decoder for FPU POP
4867     FPU    : S4;
4868     MEM    : S3;        // any mem
4869 %}
4870 
4871 // Float mem-reg operation
4872 pipe_class fpu_mem_reg(memory mem, regDPR src) %{
4873     instruction_count(2);
4874     src    : S5(read);
4875     mem    : S3(read);
4876     DECODE : S0;        // any decoder for FPU PUSH
4877     D0     : S1;        // big decoder only
4878     FPU    : S4;
4879     MEM    : S3;        // any mem
4880 %}
4881 
4882 pipe_class fpu_mem_reg_reg(memory mem, regDPR src1, regDPR src2) %{
4883     instruction_count(3);
4884     src1   : S3(read);
4885     src2   : S3(read);
4886     mem    : S3(read);
4887     DECODE : S0(2);     // any decoder for FPU PUSH
4888     D0     : S1;        // big decoder only
4889     FPU    : S4;
4890     MEM    : S3;        // any mem
4891 %}
4892 
4893 pipe_class fpu_mem_reg_mem(memory mem, regDPR src1, memory src2) %{
4894     instruction_count(3);
4895     src1   : S3(read);
4896     src2   : S3(read);
4897     mem    : S4(read);
4898     DECODE : S0;        // any decoder for FPU PUSH
4899     D0     : S0(2);     // big decoder only
4900     FPU    : S4;
4901     MEM    : S3(2);     // any mem
4902 %}
4903 
4904 pipe_class fpu_mem_mem(memory dst, memory src1) %{
4905     instruction_count(2);
4906     src1   : S3(read);
4907     dst    : S4(read);
4908     D0     : S0(2);     // big decoder only
4909     MEM    : S3(2);     // any mem
4910 %}
4911 
4912 pipe_class fpu_mem_mem_mem(memory dst, memory src1, memory src2) %{
4913     instruction_count(3);
4914     src1   : S3(read);
4915     src2   : S3(read);
4916     dst    : S4(read);
4917     D0     : S0(3);     // big decoder only
4918     FPU    : S4;
4919     MEM    : S3(3);     // any mem
4920 %}
4921 
4922 pipe_class fpu_mem_reg_con(memory mem, regDPR src1) %{
4923     instruction_count(3);
4924     src1   : S4(read);
4925     mem    : S4(read);
4926     DECODE : S0;        // any decoder for FPU PUSH
4927     D0     : S0(2);     // big decoder only
4928     FPU    : S4;
4929     MEM    : S3(2);     // any mem
4930 %}
4931 
4932 // Float load constant
4933 pipe_class fpu_reg_con(regDPR dst) %{
4934     instruction_count(2);
4935     dst    : S5(write);
4936     D0     : S0;        // big decoder only for the load
4937     DECODE : S1;        // any decoder for FPU POP
4938     FPU    : S4;
4939     MEM    : S3;        // any mem
4940 %}
4941 
4942 // Float load constant
4943 pipe_class fpu_reg_reg_con(regDPR dst, regDPR src) %{
4944     instruction_count(3);
4945     dst    : S5(write);
4946     src    : S3(read);
4947     D0     : S0;        // big decoder only for the load
4948     DECODE : S1(2);     // any decoder for FPU POP
4949     FPU    : S4;
4950     MEM    : S3;        // any mem
4951 %}
4952 
4953 // UnConditional branch
4954 pipe_class pipe_jmp( label labl ) %{
4955     single_instruction;
4956     BR   : S3;
4957 %}
4958 
4959 // Conditional branch
4960 pipe_class pipe_jcc( cmpOp cmp, eFlagsReg cr, label labl ) %{
4961     single_instruction;
4962     cr    : S1(read);
4963     BR    : S3;
4964 %}
4965 
4966 // Allocation idiom
4967 pipe_class pipe_cmpxchg( eRegP dst, eRegP heap_ptr ) %{
4968     instruction_count(1); force_serialization;
4969     fixed_latency(6);
4970     heap_ptr : S3(read);
4971     DECODE   : S0(3);
4972     D0       : S2;
4973     MEM      : S3;
4974     ALU      : S3(2);
4975     dst      : S5(write);
4976     BR       : S5;
4977 %}
4978 
4979 // Generic big/slow expanded idiom
4980 pipe_class pipe_slow(  ) %{
4981     instruction_count(10); multiple_bundles; force_serialization;
4982     fixed_latency(100);
4983     D0  : S0(2);
4984     MEM : S3(2);
4985 %}
4986 
4987 // The real do-nothing guy
4988 pipe_class empty( ) %{
4989     instruction_count(0);
4990 %}
4991 
4992 // Define the class for the Nop node
4993 define %{
4994    MachNop = empty;
4995 %}
4996 
4997 %}
4998 
4999 //----------INSTRUCTIONS-------------------------------------------------------
5000 //
5001 // match      -- States which machine-independent subtree may be replaced
5002 //               by this instruction.
5003 // ins_cost   -- The estimated cost of this instruction is used by instruction
5004 //               selection to identify a minimum cost tree of machine
5005 //               instructions that matches a tree of machine-independent
5006 //               instructions.
5007 // format     -- A string providing the disassembly for this instruction.
5008 //               The value of an instruction's operand may be inserted
5009 //               by referring to it with a '$' prefix.
5010 // opcode     -- Three instruction opcodes may be provided.  These are referred
5011 //               to within an encode class as $primary, $secondary, and $tertiary
5012 //               respectively.  The primary opcode is commonly used to
5013 //               indicate the type of machine instruction, while secondary
5014 //               and tertiary are often used for prefix options or addressing
5015 //               modes.
5016 // ins_encode -- A list of encode classes with parameters. The encode class
5017 //               name must have been defined in an 'enc_class' specification
5018 //               in the encode section of the architecture description.
5019 
5020 //----------BSWAP-Instruction--------------------------------------------------
5021 instruct bytes_reverse_int(rRegI dst) %{
5022   match(Set dst (ReverseBytesI dst));
5023 
5024   format %{ "BSWAP  $dst" %}
5025   opcode(0x0F, 0xC8);
5026   ins_encode( OpcP, OpcSReg(dst) );
5027   ins_pipe( ialu_reg );
5028 %}
5029 
5030 instruct bytes_reverse_long(eRegL dst) %{
5031   match(Set dst (ReverseBytesL dst));
5032 
5033   format %{ "BSWAP  $dst.lo\n\t"
5034             "BSWAP  $dst.hi\n\t"
5035             "XCHG   $dst.lo $dst.hi" %}
5036 
5037   ins_cost(125);
5038   ins_encode( bswap_long_bytes(dst) );
5039   ins_pipe( ialu_reg_reg);
5040 %}
5041 
5042 instruct bytes_reverse_unsigned_short(rRegI dst, eFlagsReg cr) %{
5043   match(Set dst (ReverseBytesUS dst));
5044   effect(KILL cr);
5045 
5046   format %{ "BSWAP  $dst\n\t" 
5047             "SHR    $dst,16\n\t" %}
5048   ins_encode %{
5049     __ bswapl($dst$$Register);
5050     __ shrl($dst$$Register, 16); 
5051   %}
5052   ins_pipe( ialu_reg );
5053 %}
5054 
5055 instruct bytes_reverse_short(rRegI dst, eFlagsReg cr) %{
5056   match(Set dst (ReverseBytesS dst));
5057   effect(KILL cr);
5058 
5059   format %{ "BSWAP  $dst\n\t" 
5060             "SAR    $dst,16\n\t" %}
5061   ins_encode %{
5062     __ bswapl($dst$$Register);
5063     __ sarl($dst$$Register, 16); 
5064   %}
5065   ins_pipe( ialu_reg );
5066 %}
5067 
5068 
5069 //---------- Zeros Count Instructions ------------------------------------------
5070 
5071 instruct countLeadingZerosI(rRegI dst, rRegI src, eFlagsReg cr) %{
5072   predicate(UseCountLeadingZerosInstruction);
5073   match(Set dst (CountLeadingZerosI src));
5074   effect(KILL cr);
5075 
5076   format %{ "LZCNT  $dst, $src\t# count leading zeros (int)" %}
5077   ins_encode %{
5078     __ lzcntl($dst$$Register, $src$$Register);
5079   %}
5080   ins_pipe(ialu_reg);
5081 %}
5082 
5083 instruct countLeadingZerosI_bsr(rRegI dst, rRegI src, eFlagsReg cr) %{
5084   predicate(!UseCountLeadingZerosInstruction);
5085   match(Set dst (CountLeadingZerosI src));
5086   effect(KILL cr);
5087 
5088   format %{ "BSR    $dst, $src\t# count leading zeros (int)\n\t"
5089             "JNZ    skip\n\t"
5090             "MOV    $dst, -1\n"
5091       "skip:\n\t"
5092             "NEG    $dst\n\t"
5093             "ADD    $dst, 31" %}
5094   ins_encode %{
5095     Register Rdst = $dst$$Register;
5096     Register Rsrc = $src$$Register;
5097     Label skip;
5098     __ bsrl(Rdst, Rsrc);
5099     __ jccb(Assembler::notZero, skip);
5100     __ movl(Rdst, -1);
5101     __ bind(skip);
5102     __ negl(Rdst);
5103     __ addl(Rdst, BitsPerInt - 1);
5104   %}
5105   ins_pipe(ialu_reg);
5106 %}
5107 
5108 instruct countLeadingZerosL(rRegI dst, eRegL src, eFlagsReg cr) %{
5109   predicate(UseCountLeadingZerosInstruction);
5110   match(Set dst (CountLeadingZerosL src));
5111   effect(TEMP dst, KILL cr);
5112 
5113   format %{ "LZCNT  $dst, $src.hi\t# count leading zeros (long)\n\t"
5114             "JNC    done\n\t"
5115             "LZCNT  $dst, $src.lo\n\t"
5116             "ADD    $dst, 32\n"
5117       "done:" %}
5118   ins_encode %{
5119     Register Rdst = $dst$$Register;
5120     Register Rsrc = $src$$Register;
5121     Label done;
5122     __ lzcntl(Rdst, HIGH_FROM_LOW(Rsrc));
5123     __ jccb(Assembler::carryClear, done);
5124     __ lzcntl(Rdst, Rsrc);
5125     __ addl(Rdst, BitsPerInt);
5126     __ bind(done);
5127   %}
5128   ins_pipe(ialu_reg);
5129 %}
5130 
5131 instruct countLeadingZerosL_bsr(rRegI dst, eRegL src, eFlagsReg cr) %{
5132   predicate(!UseCountLeadingZerosInstruction);
5133   match(Set dst (CountLeadingZerosL src));
5134   effect(TEMP dst, KILL cr);
5135 
5136   format %{ "BSR    $dst, $src.hi\t# count leading zeros (long)\n\t"
5137             "JZ     msw_is_zero\n\t"
5138             "ADD    $dst, 32\n\t"
5139             "JMP    not_zero\n"
5140       "msw_is_zero:\n\t"
5141             "BSR    $dst, $src.lo\n\t"
5142             "JNZ    not_zero\n\t"
5143             "MOV    $dst, -1\n"
5144       "not_zero:\n\t"
5145             "NEG    $dst\n\t"
5146             "ADD    $dst, 63\n" %}
5147  ins_encode %{
5148     Register Rdst = $dst$$Register;
5149     Register Rsrc = $src$$Register;
5150     Label msw_is_zero;
5151     Label not_zero;
5152     __ bsrl(Rdst, HIGH_FROM_LOW(Rsrc));
5153     __ jccb(Assembler::zero, msw_is_zero);
5154     __ addl(Rdst, BitsPerInt);
5155     __ jmpb(not_zero);
5156     __ bind(msw_is_zero);
5157     __ bsrl(Rdst, Rsrc);
5158     __ jccb(Assembler::notZero, not_zero);
5159     __ movl(Rdst, -1);
5160     __ bind(not_zero);
5161     __ negl(Rdst);
5162     __ addl(Rdst, BitsPerLong - 1);
5163   %}
5164   ins_pipe(ialu_reg);
5165 %}
5166 
5167 instruct countTrailingZerosI(rRegI dst, rRegI src, eFlagsReg cr) %{
5168   match(Set dst (CountTrailingZerosI src));
5169   effect(KILL cr);
5170 
5171   format %{ "BSF    $dst, $src\t# count trailing zeros (int)\n\t"
5172             "JNZ    done\n\t"
5173             "MOV    $dst, 32\n"
5174       "done:" %}
5175   ins_encode %{
5176     Register Rdst = $dst$$Register;
5177     Label done;
5178     __ bsfl(Rdst, $src$$Register);
5179     __ jccb(Assembler::notZero, done);
5180     __ movl(Rdst, BitsPerInt);
5181     __ bind(done);
5182   %}
5183   ins_pipe(ialu_reg);
5184 %}
5185 
5186 instruct countTrailingZerosL(rRegI dst, eRegL src, eFlagsReg cr) %{
5187   match(Set dst (CountTrailingZerosL src));
5188   effect(TEMP dst, KILL cr);
5189 
5190   format %{ "BSF    $dst, $src.lo\t# count trailing zeros (long)\n\t"
5191             "JNZ    done\n\t"
5192             "BSF    $dst, $src.hi\n\t"
5193             "JNZ    msw_not_zero\n\t"
5194             "MOV    $dst, 32\n"
5195       "msw_not_zero:\n\t"
5196             "ADD    $dst, 32\n"
5197       "done:" %}
5198   ins_encode %{
5199     Register Rdst = $dst$$Register;
5200     Register Rsrc = $src$$Register;
5201     Label msw_not_zero;
5202     Label done;
5203     __ bsfl(Rdst, Rsrc);
5204     __ jccb(Assembler::notZero, done);
5205     __ bsfl(Rdst, HIGH_FROM_LOW(Rsrc));
5206     __ jccb(Assembler::notZero, msw_not_zero);
5207     __ movl(Rdst, BitsPerInt);
5208     __ bind(msw_not_zero);
5209     __ addl(Rdst, BitsPerInt);
5210     __ bind(done);
5211   %}
5212   ins_pipe(ialu_reg);
5213 %}
5214 
5215 
5216 //---------- Population Count Instructions -------------------------------------
5217 
5218 instruct popCountI(rRegI dst, rRegI src, eFlagsReg cr) %{
5219   predicate(UsePopCountInstruction);
5220   match(Set dst (PopCountI src));
5221   effect(KILL cr);
5222 
5223   format %{ "POPCNT $dst, $src" %}
5224   ins_encode %{
5225     __ popcntl($dst$$Register, $src$$Register);
5226   %}
5227   ins_pipe(ialu_reg);
5228 %}
5229 
5230 instruct popCountI_mem(rRegI dst, memory mem, eFlagsReg cr) %{
5231   predicate(UsePopCountInstruction);
5232   match(Set dst (PopCountI (LoadI mem)));
5233   effect(KILL cr);
5234 
5235   format %{ "POPCNT $dst, $mem" %}
5236   ins_encode %{
5237     __ popcntl($dst$$Register, $mem$$Address);
5238   %}
5239   ins_pipe(ialu_reg);
5240 %}
5241 
5242 // Note: Long.bitCount(long) returns an int.
5243 instruct popCountL(rRegI dst, eRegL src, rRegI tmp, eFlagsReg cr) %{
5244   predicate(UsePopCountInstruction);
5245   match(Set dst (PopCountL src));
5246   effect(KILL cr, TEMP tmp, TEMP dst);
5247 
5248   format %{ "POPCNT $dst, $src.lo\n\t"
5249             "POPCNT $tmp, $src.hi\n\t"
5250             "ADD    $dst, $tmp" %}
5251   ins_encode %{
5252     __ popcntl($dst$$Register, $src$$Register);
5253     __ popcntl($tmp$$Register, HIGH_FROM_LOW($src$$Register));
5254     __ addl($dst$$Register, $tmp$$Register);
5255   %}
5256   ins_pipe(ialu_reg);
5257 %}
5258 
5259 // Note: Long.bitCount(long) returns an int.
5260 instruct popCountL_mem(rRegI dst, memory mem, rRegI tmp, eFlagsReg cr) %{
5261   predicate(UsePopCountInstruction);
5262   match(Set dst (PopCountL (LoadL mem)));
5263   effect(KILL cr, TEMP tmp, TEMP dst);
5264 
5265   format %{ "POPCNT $dst, $mem\n\t"
5266             "POPCNT $tmp, $mem+4\n\t"
5267             "ADD    $dst, $tmp" %}
5268   ins_encode %{
5269     //__ popcntl($dst$$Register, $mem$$Address$$first);
5270     //__ popcntl($tmp$$Register, $mem$$Address$$second);
5271     __ popcntl($dst$$Register, Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none));
5272     __ popcntl($tmp$$Register, Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp + 4, relocInfo::none));
5273     __ addl($dst$$Register, $tmp$$Register);
5274   %}
5275   ins_pipe(ialu_reg);
5276 %}
5277 
5278 
5279 //----------Load/Store/Move Instructions---------------------------------------
5280 //----------Load Instructions--------------------------------------------------
5281 // Load Byte (8bit signed)
5282 instruct loadB(xRegI dst, memory mem) %{
5283   match(Set dst (LoadB mem));
5284 
5285   ins_cost(125);
5286   format %{ "MOVSX8 $dst,$mem\t# byte" %}
5287 
5288   ins_encode %{
5289     __ movsbl($dst$$Register, $mem$$Address);
5290   %}
5291 
5292   ins_pipe(ialu_reg_mem);
5293 %}
5294 
5295 // Load Byte (8bit signed) into Long Register
5296 instruct loadB2L(eRegL dst, memory mem, eFlagsReg cr) %{
5297   match(Set dst (ConvI2L (LoadB mem)));
5298   effect(KILL cr);
5299 
5300   ins_cost(375);
5301   format %{ "MOVSX8 $dst.lo,$mem\t# byte -> long\n\t"
5302             "MOV    $dst.hi,$dst.lo\n\t"
5303             "SAR    $dst.hi,7" %}
5304 
5305   ins_encode %{
5306     __ movsbl($dst$$Register, $mem$$Address);
5307     __ movl(HIGH_FROM_LOW($dst$$Register), $dst$$Register); // This is always a different register.
5308     __ sarl(HIGH_FROM_LOW($dst$$Register), 7); // 24+1 MSB are already signed extended.
5309   %}
5310 
5311   ins_pipe(ialu_reg_mem);
5312 %}
5313 
5314 // Load Unsigned Byte (8bit UNsigned)
5315 instruct loadUB(xRegI dst, memory mem) %{
5316   match(Set dst (LoadUB mem));
5317 
5318   ins_cost(125);
5319   format %{ "MOVZX8 $dst,$mem\t# ubyte -> int" %}
5320 
5321   ins_encode %{
5322     __ movzbl($dst$$Register, $mem$$Address);
5323   %}
5324 
5325   ins_pipe(ialu_reg_mem);
5326 %}
5327 
5328 // Load Unsigned Byte (8 bit UNsigned) into Long Register
5329 instruct loadUB2L(eRegL dst, memory mem, eFlagsReg cr) %{
5330   match(Set dst (ConvI2L (LoadUB mem)));
5331   effect(KILL cr);
5332 
5333   ins_cost(250);
5334   format %{ "MOVZX8 $dst.lo,$mem\t# ubyte -> long\n\t"
5335             "XOR    $dst.hi,$dst.hi" %}
5336 
5337   ins_encode %{
5338     Register Rdst = $dst$$Register;
5339     __ movzbl(Rdst, $mem$$Address);
5340     __ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst));
5341   %}
5342 
5343   ins_pipe(ialu_reg_mem);
5344 %}
5345 
5346 // Load Unsigned Byte (8 bit UNsigned) with mask into Long Register
5347 instruct loadUB2L_immI8(eRegL dst, memory mem, immI8 mask, eFlagsReg cr) %{
5348   match(Set dst (ConvI2L (AndI (LoadUB mem) mask)));
5349   effect(KILL cr);
5350 
5351   format %{ "MOVZX8 $dst.lo,$mem\t# ubyte & 8-bit mask -> long\n\t"
5352             "XOR    $dst.hi,$dst.hi\n\t"
5353             "AND    $dst.lo,$mask" %}
5354   ins_encode %{
5355     Register Rdst = $dst$$Register;
5356     __ movzbl(Rdst, $mem$$Address);
5357     __ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst));
5358     __ andl(Rdst, $mask$$constant);
5359   %}
5360   ins_pipe(ialu_reg_mem);
5361 %}
5362 
5363 // Load Short (16bit signed)
5364 instruct loadS(rRegI dst, memory mem) %{
5365   match(Set dst (LoadS mem));
5366 
5367   ins_cost(125);
5368   format %{ "MOVSX  $dst,$mem\t# short" %}
5369 
5370   ins_encode %{
5371     __ movswl($dst$$Register, $mem$$Address);
5372   %}
5373 
5374   ins_pipe(ialu_reg_mem);
5375 %}
5376 
5377 // Load Short (16 bit signed) to Byte (8 bit signed)
5378 instruct loadS2B(rRegI dst, memory mem, immI_24 twentyfour) %{
5379   match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
5380 
5381   ins_cost(125);
5382   format %{ "MOVSX  $dst, $mem\t# short -> byte" %}
5383   ins_encode %{
5384     __ movsbl($dst$$Register, $mem$$Address);
5385   %}
5386   ins_pipe(ialu_reg_mem);
5387 %}
5388 
5389 // Load Short (16bit signed) into Long Register
5390 instruct loadS2L(eRegL dst, memory mem, eFlagsReg cr) %{
5391   match(Set dst (ConvI2L (LoadS mem)));
5392   effect(KILL cr);
5393 
5394   ins_cost(375);
5395   format %{ "MOVSX  $dst.lo,$mem\t# short -> long\n\t"
5396             "MOV    $dst.hi,$dst.lo\n\t"
5397             "SAR    $dst.hi,15" %}
5398 
5399   ins_encode %{
5400     __ movswl($dst$$Register, $mem$$Address);
5401     __ movl(HIGH_FROM_LOW($dst$$Register), $dst$$Register); // This is always a different register.
5402     __ sarl(HIGH_FROM_LOW($dst$$Register), 15); // 16+1 MSB are already signed extended.
5403   %}
5404 
5405   ins_pipe(ialu_reg_mem);
5406 %}
5407 
5408 // Load Unsigned Short/Char (16bit unsigned)
5409 instruct loadUS(rRegI dst, memory mem) %{
5410   match(Set dst (LoadUS mem));
5411 
5412   ins_cost(125);
5413   format %{ "MOVZX  $dst,$mem\t# ushort/char -> int" %}
5414 
5415   ins_encode %{
5416     __ movzwl($dst$$Register, $mem$$Address);
5417   %}
5418 
5419   ins_pipe(ialu_reg_mem);
5420 %}
5421 
5422 // Load Unsigned Short/Char (16 bit UNsigned) to Byte (8 bit signed)
5423 instruct loadUS2B(rRegI dst, memory mem, immI_24 twentyfour) %{
5424   match(Set dst (RShiftI (LShiftI (LoadUS mem) twentyfour) twentyfour));
5425 
5426   ins_cost(125);
5427   format %{ "MOVSX  $dst, $mem\t# ushort -> byte" %}
5428   ins_encode %{
5429     __ movsbl($dst$$Register, $mem$$Address);
5430   %}
5431   ins_pipe(ialu_reg_mem);
5432 %}
5433 
5434 // Load Unsigned Short/Char (16 bit UNsigned) into Long Register
5435 instruct loadUS2L(eRegL dst, memory mem, eFlagsReg cr) %{
5436   match(Set dst (ConvI2L (LoadUS mem)));
5437   effect(KILL cr);
5438 
5439   ins_cost(250);
5440   format %{ "MOVZX  $dst.lo,$mem\t# ushort/char -> long\n\t"
5441             "XOR    $dst.hi,$dst.hi" %}
5442 
5443   ins_encode %{
5444     __ movzwl($dst$$Register, $mem$$Address);
5445     __ xorl(HIGH_FROM_LOW($dst$$Register), HIGH_FROM_LOW($dst$$Register));
5446   %}
5447 
5448   ins_pipe(ialu_reg_mem);
5449 %}
5450 
5451 // Load Unsigned Short/Char (16 bit UNsigned) with mask 0xFF into Long Register
5452 instruct loadUS2L_immI_255(eRegL dst, memory mem, immI_255 mask, eFlagsReg cr) %{
5453   match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
5454   effect(KILL cr);
5455 
5456   format %{ "MOVZX8 $dst.lo,$mem\t# ushort/char & 0xFF -> long\n\t"
5457             "XOR    $dst.hi,$dst.hi" %}
5458   ins_encode %{
5459     Register Rdst = $dst$$Register;
5460     __ movzbl(Rdst, $mem$$Address);
5461     __ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst));
5462   %}
5463   ins_pipe(ialu_reg_mem);
5464 %}
5465 
5466 // Load Unsigned Short/Char (16 bit UNsigned) with a 16-bit mask into Long Register
5467 instruct loadUS2L_immI16(eRegL dst, memory mem, immI16 mask, eFlagsReg cr) %{
5468   match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
5469   effect(KILL cr);
5470 
5471   format %{ "MOVZX  $dst.lo, $mem\t# ushort/char & 16-bit mask -> long\n\t"
5472             "XOR    $dst.hi,$dst.hi\n\t"
5473             "AND    $dst.lo,$mask" %}
5474   ins_encode %{
5475     Register Rdst = $dst$$Register;
5476     __ movzwl(Rdst, $mem$$Address);
5477     __ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst));
5478     __ andl(Rdst, $mask$$constant);
5479   %}
5480   ins_pipe(ialu_reg_mem);
5481 %}
5482 
5483 // Load Integer
5484 instruct loadI(rRegI dst, memory mem) %{
5485   match(Set dst (LoadI mem));
5486 
5487   ins_cost(125);
5488   format %{ "MOV    $dst,$mem\t# int" %}
5489 
5490   ins_encode %{
5491     __ movl($dst$$Register, $mem$$Address);
5492   %}
5493 
5494   ins_pipe(ialu_reg_mem);
5495 %}
5496 
5497 // Load Integer (32 bit signed) to Byte (8 bit signed)
5498 instruct loadI2B(rRegI dst, memory mem, immI_24 twentyfour) %{
5499   match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
5500 
5501   ins_cost(125);
5502   format %{ "MOVSX  $dst, $mem\t# int -> byte" %}
5503   ins_encode %{
5504     __ movsbl($dst$$Register, $mem$$Address);
5505   %}
5506   ins_pipe(ialu_reg_mem);
5507 %}
5508 
5509 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
5510 instruct loadI2UB(rRegI dst, memory mem, immI_255 mask) %{
5511   match(Set dst (AndI (LoadI mem) mask));
5512 
5513   ins_cost(125);
5514   format %{ "MOVZX  $dst, $mem\t# int -> ubyte" %}
5515   ins_encode %{
5516     __ movzbl($dst$$Register, $mem$$Address);
5517   %}
5518   ins_pipe(ialu_reg_mem);
5519 %}
5520 
5521 // Load Integer (32 bit signed) to Short (16 bit signed)
5522 instruct loadI2S(rRegI dst, memory mem, immI_16 sixteen) %{
5523   match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
5524 
5525   ins_cost(125);
5526   format %{ "MOVSX  $dst, $mem\t# int -> short" %}
5527   ins_encode %{
5528     __ movswl($dst$$Register, $mem$$Address);
5529   %}
5530   ins_pipe(ialu_reg_mem);
5531 %}
5532 
5533 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
5534 instruct loadI2US(rRegI dst, memory mem, immI_65535 mask) %{
5535   match(Set dst (AndI (LoadI mem) mask));
5536 
5537   ins_cost(125);
5538   format %{ "MOVZX  $dst, $mem\t# int -> ushort/char" %}
5539   ins_encode %{
5540     __ movzwl($dst$$Register, $mem$$Address);
5541   %}
5542   ins_pipe(ialu_reg_mem);
5543 %}
5544 
5545 // Load Integer into Long Register
5546 instruct loadI2L(eRegL dst, memory mem, eFlagsReg cr) %{
5547   match(Set dst (ConvI2L (LoadI mem)));
5548   effect(KILL cr);
5549 
5550   ins_cost(375);
5551   format %{ "MOV    $dst.lo,$mem\t# int -> long\n\t"
5552             "MOV    $dst.hi,$dst.lo\n\t"
5553             "SAR    $dst.hi,31" %}
5554 
5555   ins_encode %{
5556     __ movl($dst$$Register, $mem$$Address);
5557     __ movl(HIGH_FROM_LOW($dst$$Register), $dst$$Register); // This is always a different register.
5558     __ sarl(HIGH_FROM_LOW($dst$$Register), 31);
5559   %}
5560 
5561   ins_pipe(ialu_reg_mem);
5562 %}
5563 
5564 // Load Integer with mask 0xFF into Long Register
5565 instruct loadI2L_immI_255(eRegL dst, memory mem, immI_255 mask, eFlagsReg cr) %{
5566   match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
5567   effect(KILL cr);
5568 
5569   format %{ "MOVZX8 $dst.lo,$mem\t# int & 0xFF -> long\n\t"
5570             "XOR    $dst.hi,$dst.hi" %}
5571   ins_encode %{
5572     Register Rdst = $dst$$Register;
5573     __ movzbl(Rdst, $mem$$Address);
5574     __ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst));
5575   %}
5576   ins_pipe(ialu_reg_mem);
5577 %}
5578 
5579 // Load Integer with mask 0xFFFF into Long Register
5580 instruct loadI2L_immI_65535(eRegL dst, memory mem, immI_65535 mask, eFlagsReg cr) %{
5581   match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
5582   effect(KILL cr);
5583 
5584   format %{ "MOVZX  $dst.lo,$mem\t# int & 0xFFFF -> long\n\t"
5585             "XOR    $dst.hi,$dst.hi" %}
5586   ins_encode %{
5587     Register Rdst = $dst$$Register;
5588     __ movzwl(Rdst, $mem$$Address);
5589     __ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst));
5590   %}
5591   ins_pipe(ialu_reg_mem);
5592 %}
5593 
5594 // Load Integer with 31-bit mask into Long Register
5595 instruct loadI2L_immU31(eRegL dst, memory mem, immU31 mask, eFlagsReg cr) %{
5596   match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
5597   effect(KILL cr);
5598 
5599   format %{ "MOV    $dst.lo,$mem\t# int & 31-bit mask -> long\n\t"
5600             "XOR    $dst.hi,$dst.hi\n\t"
5601             "AND    $dst.lo,$mask" %}
5602   ins_encode %{
5603     Register Rdst = $dst$$Register;
5604     __ movl(Rdst, $mem$$Address);
5605     __ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst));
5606     __ andl(Rdst, $mask$$constant);
5607   %}
5608   ins_pipe(ialu_reg_mem);
5609 %}
5610 
5611 // Load Unsigned Integer into Long Register
5612 instruct loadUI2L(eRegL dst, memory mem, immL_32bits mask, eFlagsReg cr) %{
5613   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
5614   effect(KILL cr);
5615 
5616   ins_cost(250);
5617   format %{ "MOV    $dst.lo,$mem\t# uint -> long\n\t"
5618             "XOR    $dst.hi,$dst.hi" %}
5619 
5620   ins_encode %{
5621     __ movl($dst$$Register, $mem$$Address);
5622     __ xorl(HIGH_FROM_LOW($dst$$Register), HIGH_FROM_LOW($dst$$Register));
5623   %}
5624 
5625   ins_pipe(ialu_reg_mem);
5626 %}
5627 
5628 // Load Long.  Cannot clobber address while loading, so restrict address
5629 // register to ESI
5630 instruct loadL(eRegL dst, load_long_memory mem) %{
5631   predicate(!((LoadLNode*)n)->require_atomic_access());
5632   match(Set dst (LoadL mem));
5633 
5634   ins_cost(250);
5635   format %{ "MOV    $dst.lo,$mem\t# long\n\t"
5636             "MOV    $dst.hi,$mem+4" %}
5637 
5638   ins_encode %{
5639     Address Amemlo = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
5640     Address Amemhi = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp + 4, relocInfo::none);
5641     __ movl($dst$$Register, Amemlo);
5642     __ movl(HIGH_FROM_LOW($dst$$Register), Amemhi);
5643   %}
5644 
5645   ins_pipe(ialu_reg_long_mem);
5646 %}
5647 
5648 // Volatile Load Long.  Must be atomic, so do 64-bit FILD
5649 // then store it down to the stack and reload on the int
5650 // side.
5651 instruct loadL_volatile(stackSlotL dst, memory mem) %{
5652   predicate(UseSSE<=1 && ((LoadLNode*)n)->require_atomic_access());
5653   match(Set dst (LoadL mem));
5654 
5655   ins_cost(200);
5656   format %{ "FILD   $mem\t# Atomic volatile long load\n\t"
5657             "FISTp  $dst" %}
5658   ins_encode(enc_loadL_volatile(mem,dst));
5659   ins_pipe( fpu_reg_mem );
5660 %}
5661 
5662 instruct loadLX_volatile(stackSlotL dst, memory mem, regD tmp) %{
5663   predicate(UseSSE>=2 && ((LoadLNode*)n)->require_atomic_access());
5664   match(Set dst (LoadL mem));
5665   effect(TEMP tmp);
5666   ins_cost(180);
5667   format %{ "MOVSD  $tmp,$mem\t# Atomic volatile long load\n\t"
5668             "MOVSD  $dst,$tmp" %}
5669   ins_encode %{
5670     __ movdbl($tmp$$XMMRegister, $mem$$Address);
5671     __ movdbl(Address(rsp, $dst$$disp), $tmp$$XMMRegister);
5672   %}
5673   ins_pipe( pipe_slow );
5674 %}
5675 
5676 instruct loadLX_reg_volatile(eRegL dst, memory mem, regD tmp) %{
5677   predicate(UseSSE>=2 && ((LoadLNode*)n)->require_atomic_access());
5678   match(Set dst (LoadL mem));
5679   effect(TEMP tmp);
5680   ins_cost(160);
5681   format %{ "MOVSD  $tmp,$mem\t# Atomic volatile long load\n\t"
5682             "MOVD   $dst.lo,$tmp\n\t"
5683             "PSRLQ  $tmp,32\n\t"
5684             "MOVD   $dst.hi,$tmp" %}
5685   ins_encode %{
5686     __ movdbl($tmp$$XMMRegister, $mem$$Address);
5687     __ movdl($dst$$Register, $tmp$$XMMRegister);
5688     __ psrlq($tmp$$XMMRegister, 32);
5689     __ movdl(HIGH_FROM_LOW($dst$$Register), $tmp$$XMMRegister);
5690   %}
5691   ins_pipe( pipe_slow );
5692 %}
5693 
5694 // Load Range
5695 instruct loadRange(rRegI dst, memory mem) %{
5696   match(Set dst (LoadRange mem));
5697 
5698   ins_cost(125);
5699   format %{ "MOV    $dst,$mem" %}
5700   opcode(0x8B);
5701   ins_encode( OpcP, RegMem(dst,mem));
5702   ins_pipe( ialu_reg_mem );
5703 %}
5704 
5705 
5706 // Load Pointer
5707 instruct loadP(eRegP dst, memory mem) %{
5708   match(Set dst (LoadP mem));
5709 
5710   ins_cost(125);
5711   format %{ "MOV    $dst,$mem" %}
5712   opcode(0x8B);
5713   ins_encode( OpcP, RegMem(dst,mem));
5714   ins_pipe( ialu_reg_mem );
5715 %}
5716 
5717 // Load Klass Pointer
5718 instruct loadKlass(eRegP dst, memory mem) %{
5719   match(Set dst (LoadKlass mem));
5720 
5721   ins_cost(125);
5722   format %{ "MOV    $dst,$mem" %}
5723   opcode(0x8B);
5724   ins_encode( OpcP, RegMem(dst,mem));
5725   ins_pipe( ialu_reg_mem );
5726 %}
5727 
5728 // Load Double
5729 instruct loadDPR(regDPR dst, memory mem) %{
5730   predicate(UseSSE<=1);
5731   match(Set dst (LoadD mem));
5732 
5733   ins_cost(150);
5734   format %{ "FLD_D  ST,$mem\n\t"
5735             "FSTP   $dst" %}
5736   opcode(0xDD);               /* DD /0 */
5737   ins_encode( OpcP, RMopc_Mem(0x00,mem),
5738               Pop_Reg_DPR(dst) );
5739   ins_pipe( fpu_reg_mem );
5740 %}
5741 
5742 // Load Double to XMM
5743 instruct loadD(regD dst, memory mem) %{
5744   predicate(UseSSE>=2 && UseXmmLoadAndClearUpper);
5745   match(Set dst (LoadD mem));
5746   ins_cost(145);
5747   format %{ "MOVSD  $dst,$mem" %}
5748   ins_encode %{
5749     __ movdbl ($dst$$XMMRegister, $mem$$Address);
5750   %}
5751   ins_pipe( pipe_slow );
5752 %}
5753 
5754 instruct loadD_partial(regD dst, memory mem) %{
5755   predicate(UseSSE>=2 && !UseXmmLoadAndClearUpper);
5756   match(Set dst (LoadD mem));
5757   ins_cost(145);
5758   format %{ "MOVLPD $dst,$mem" %}
5759   ins_encode %{
5760     __ movdbl ($dst$$XMMRegister, $mem$$Address);
5761   %}
5762   ins_pipe( pipe_slow );
5763 %}
5764 
5765 // Load to XMM register (single-precision floating point)
5766 // MOVSS instruction
5767 instruct loadF(regF dst, memory mem) %{
5768   predicate(UseSSE>=1);
5769   match(Set dst (LoadF mem));
5770   ins_cost(145);
5771   format %{ "MOVSS  $dst,$mem" %}
5772   ins_encode %{
5773     __ movflt ($dst$$XMMRegister, $mem$$Address);
5774   %}
5775   ins_pipe( pipe_slow );
5776 %}
5777 
5778 // Load Float
5779 instruct loadFPR(regFPR dst, memory mem) %{
5780   predicate(UseSSE==0);
5781   match(Set dst (LoadF mem));
5782 
5783   ins_cost(150);
5784   format %{ "FLD_S  ST,$mem\n\t"
5785             "FSTP   $dst" %}
5786   opcode(0xD9);               /* D9 /0 */
5787   ins_encode( OpcP, RMopc_Mem(0x00,mem),
5788               Pop_Reg_FPR(dst) );
5789   ins_pipe( fpu_reg_mem );
5790 %}
5791 
5792 // Load Effective Address
5793 instruct leaP8(eRegP dst, indOffset8 mem) %{
5794   match(Set dst mem);
5795 
5796   ins_cost(110);
5797   format %{ "LEA    $dst,$mem" %}
5798   opcode(0x8D);
5799   ins_encode( OpcP, RegMem(dst,mem));
5800   ins_pipe( ialu_reg_reg_fat );
5801 %}
5802 
5803 instruct leaP32(eRegP dst, indOffset32 mem) %{
5804   match(Set dst mem);
5805 
5806   ins_cost(110);
5807   format %{ "LEA    $dst,$mem" %}
5808   opcode(0x8D);
5809   ins_encode( OpcP, RegMem(dst,mem));
5810   ins_pipe( ialu_reg_reg_fat );
5811 %}
5812 
5813 instruct leaPIdxOff(eRegP dst, indIndexOffset mem) %{
5814   match(Set dst mem);
5815 
5816   ins_cost(110);
5817   format %{ "LEA    $dst,$mem" %}
5818   opcode(0x8D);
5819   ins_encode( OpcP, RegMem(dst,mem));
5820   ins_pipe( ialu_reg_reg_fat );
5821 %}
5822 
5823 instruct leaPIdxScale(eRegP dst, indIndexScale mem) %{
5824   match(Set dst mem);
5825 
5826   ins_cost(110);
5827   format %{ "LEA    $dst,$mem" %}
5828   opcode(0x8D);
5829   ins_encode( OpcP, RegMem(dst,mem));
5830   ins_pipe( ialu_reg_reg_fat );
5831 %}
5832 
5833 instruct leaPIdxScaleOff(eRegP dst, indIndexScaleOffset mem) %{
5834   match(Set dst mem);
5835 
5836   ins_cost(110);
5837   format %{ "LEA    $dst,$mem" %}
5838   opcode(0x8D);
5839   ins_encode( OpcP, RegMem(dst,mem));
5840   ins_pipe( ialu_reg_reg_fat );
5841 %}
5842 
5843 // Load Constant
5844 instruct loadConI(rRegI dst, immI src) %{
5845   match(Set dst src);
5846 
5847   format %{ "MOV    $dst,$src" %}
5848   ins_encode( LdImmI(dst, src) );
5849   ins_pipe( ialu_reg_fat );
5850 %}
5851 
5852 // Load Constant zero
5853 instruct loadConI0(rRegI dst, immI0 src, eFlagsReg cr) %{
5854   match(Set dst src);
5855   effect(KILL cr);
5856 
5857   ins_cost(50);
5858   format %{ "XOR    $dst,$dst" %}
5859   opcode(0x33);  /* + rd */
5860   ins_encode( OpcP, RegReg( dst, dst ) );
5861   ins_pipe( ialu_reg );
5862 %}
5863 
5864 instruct loadConP(eRegP dst, immP src) %{
5865   match(Set dst src);
5866 
5867   format %{ "MOV    $dst,$src" %}
5868   opcode(0xB8);  /* + rd */
5869   ins_encode( LdImmP(dst, src) );
5870   ins_pipe( ialu_reg_fat );
5871 %}
5872 
5873 instruct loadConL(eRegL dst, immL src, eFlagsReg cr) %{
5874   match(Set dst src);
5875   effect(KILL cr);
5876   ins_cost(200);
5877   format %{ "MOV    $dst.lo,$src.lo\n\t"
5878             "MOV    $dst.hi,$src.hi" %}
5879   opcode(0xB8);
5880   ins_encode( LdImmL_Lo(dst, src), LdImmL_Hi(dst, src) );
5881   ins_pipe( ialu_reg_long_fat );
5882 %}
5883 
5884 instruct loadConL0(eRegL dst, immL0 src, eFlagsReg cr) %{
5885   match(Set dst src);
5886   effect(KILL cr);
5887   ins_cost(150);
5888   format %{ "XOR    $dst.lo,$dst.lo\n\t"
5889             "XOR    $dst.hi,$dst.hi" %}
5890   opcode(0x33,0x33);
5891   ins_encode( RegReg_Lo(dst,dst), RegReg_Hi(dst, dst) );
5892   ins_pipe( ialu_reg_long );
5893 %}
5894 
5895 // The instruction usage is guarded by predicate in operand immFPR().
5896 instruct loadConFPR(regFPR dst, immFPR con) %{
5897   match(Set dst con);
5898   ins_cost(125);
5899   format %{ "FLD_S  ST,[$constantaddress]\t# load from constant table: float=$con\n\t"
5900             "FSTP   $dst" %}
5901   ins_encode %{
5902     __ fld_s($constantaddress($con));
5903     __ fstp_d($dst$$reg);
5904   %}
5905   ins_pipe(fpu_reg_con);
5906 %}
5907 
5908 // The instruction usage is guarded by predicate in operand immFPR0().
5909 instruct loadConFPR0(regFPR dst, immFPR0 con) %{
5910   match(Set dst con);
5911   ins_cost(125);
5912   format %{ "FLDZ   ST\n\t"
5913             "FSTP   $dst" %}
5914   ins_encode %{
5915     __ fldz();
5916     __ fstp_d($dst$$reg);
5917   %}
5918   ins_pipe(fpu_reg_con);
5919 %}
5920 
5921 // The instruction usage is guarded by predicate in operand immFPR1().
5922 instruct loadConFPR1(regFPR dst, immFPR1 con) %{
5923   match(Set dst con);
5924   ins_cost(125);
5925   format %{ "FLD1   ST\n\t"
5926             "FSTP   $dst" %}
5927   ins_encode %{
5928     __ fld1();
5929     __ fstp_d($dst$$reg);
5930   %}
5931   ins_pipe(fpu_reg_con);
5932 %}
5933 
5934 // The instruction usage is guarded by predicate in operand immF().
5935 instruct loadConF(regF dst, immF con) %{
5936   match(Set dst con);
5937   ins_cost(125);
5938   format %{ "MOVSS  $dst,[$constantaddress]\t# load from constant table: float=$con" %}
5939   ins_encode %{
5940     __ movflt($dst$$XMMRegister, $constantaddress($con));
5941   %}
5942   ins_pipe(pipe_slow);
5943 %}
5944 
5945 // The instruction usage is guarded by predicate in operand immF0().
5946 instruct loadConF0(regF dst, immF0 src) %{
5947   match(Set dst src);
5948   ins_cost(100);
5949   format %{ "XORPS  $dst,$dst\t# float 0.0" %}
5950   ins_encode %{
5951     __ xorps($dst$$XMMRegister, $dst$$XMMRegister);
5952   %}
5953   ins_pipe(pipe_slow);
5954 %}
5955 
5956 // The instruction usage is guarded by predicate in operand immDPR().
5957 instruct loadConDPR(regDPR dst, immDPR con) %{
5958   match(Set dst con);
5959   ins_cost(125);
5960 
5961   format %{ "FLD_D  ST,[$constantaddress]\t# load from constant table: double=$con\n\t"
5962             "FSTP   $dst" %}
5963   ins_encode %{
5964     __ fld_d($constantaddress($con));
5965     __ fstp_d($dst$$reg);
5966   %}
5967   ins_pipe(fpu_reg_con);
5968 %}
5969 
5970 // The instruction usage is guarded by predicate in operand immDPR0().
5971 instruct loadConDPR0(regDPR dst, immDPR0 con) %{
5972   match(Set dst con);
5973   ins_cost(125);
5974 
5975   format %{ "FLDZ   ST\n\t"
5976             "FSTP   $dst" %}
5977   ins_encode %{
5978     __ fldz();
5979     __ fstp_d($dst$$reg);
5980   %}
5981   ins_pipe(fpu_reg_con);
5982 %}
5983 
5984 // The instruction usage is guarded by predicate in operand immDPR1().
5985 instruct loadConDPR1(regDPR dst, immDPR1 con) %{
5986   match(Set dst con);
5987   ins_cost(125);
5988 
5989   format %{ "FLD1   ST\n\t"
5990             "FSTP   $dst" %}
5991   ins_encode %{
5992     __ fld1();
5993     __ fstp_d($dst$$reg);
5994   %}
5995   ins_pipe(fpu_reg_con);
5996 %}
5997 
5998 // The instruction usage is guarded by predicate in operand immD().
5999 instruct loadConD(regD dst, immD con) %{
6000   match(Set dst con);
6001   ins_cost(125);
6002   format %{ "MOVSD  $dst,[$constantaddress]\t# load from constant table: double=$con" %}
6003   ins_encode %{
6004     __ movdbl($dst$$XMMRegister, $constantaddress($con));
6005   %}
6006   ins_pipe(pipe_slow);
6007 %}
6008 
6009 // The instruction usage is guarded by predicate in operand immD0().
6010 instruct loadConD0(regD dst, immD0 src) %{
6011   match(Set dst src);
6012   ins_cost(100);
6013   format %{ "XORPD  $dst,$dst\t# double 0.0" %}
6014   ins_encode %{
6015     __ xorpd ($dst$$XMMRegister, $dst$$XMMRegister);
6016   %}
6017   ins_pipe( pipe_slow );
6018 %}
6019 
6020 // Load Stack Slot
6021 instruct loadSSI(rRegI dst, stackSlotI src) %{
6022   match(Set dst src);
6023   ins_cost(125);
6024 
6025   format %{ "MOV    $dst,$src" %}
6026   opcode(0x8B);
6027   ins_encode( OpcP, RegMem(dst,src));
6028   ins_pipe( ialu_reg_mem );
6029 %}
6030 
6031 instruct loadSSL(eRegL dst, stackSlotL src) %{
6032   match(Set dst src);
6033 
6034   ins_cost(200);
6035   format %{ "MOV    $dst,$src.lo\n\t"
6036             "MOV    $dst+4,$src.hi" %}
6037   opcode(0x8B, 0x8B);
6038   ins_encode( OpcP, RegMem( dst, src ), OpcS, RegMem_Hi( dst, src ) );
6039   ins_pipe( ialu_mem_long_reg );
6040 %}
6041 
6042 // Load Stack Slot
6043 instruct loadSSP(eRegP dst, stackSlotP src) %{
6044   match(Set dst src);
6045   ins_cost(125);
6046 
6047   format %{ "MOV    $dst,$src" %}
6048   opcode(0x8B);
6049   ins_encode( OpcP, RegMem(dst,src));
6050   ins_pipe( ialu_reg_mem );
6051 %}
6052 
6053 // Load Stack Slot
6054 instruct loadSSF(regFPR dst, stackSlotF src) %{
6055   match(Set dst src);
6056   ins_cost(125);
6057 
6058   format %{ "FLD_S  $src\n\t"
6059             "FSTP   $dst" %}
6060   opcode(0xD9);               /* D9 /0, FLD m32real */
6061   ins_encode( OpcP, RMopc_Mem_no_oop(0x00,src),
6062               Pop_Reg_FPR(dst) );
6063   ins_pipe( fpu_reg_mem );
6064 %}
6065 
6066 // Load Stack Slot
6067 instruct loadSSD(regDPR dst, stackSlotD src) %{
6068   match(Set dst src);
6069   ins_cost(125);
6070 
6071   format %{ "FLD_D  $src\n\t"
6072             "FSTP   $dst" %}
6073   opcode(0xDD);               /* DD /0, FLD m64real */
6074   ins_encode( OpcP, RMopc_Mem_no_oop(0x00,src),
6075               Pop_Reg_DPR(dst) );
6076   ins_pipe( fpu_reg_mem );
6077 %}
6078 
6079 // Prefetch instructions.
6080 // Must be safe to execute with invalid address (cannot fault).
6081 
6082 instruct prefetchr0( memory mem ) %{
6083   predicate(UseSSE==0 && !VM_Version::supports_3dnow_prefetch());
6084   match(PrefetchRead mem);
6085   ins_cost(0);
6086   size(0);
6087   format %{ "PREFETCHR (non-SSE is empty encoding)" %}
6088   ins_encode();
6089   ins_pipe(empty);
6090 %}
6091 
6092 instruct prefetchr( memory mem ) %{
6093   predicate(UseSSE==0 && VM_Version::supports_3dnow_prefetch() || ReadPrefetchInstr==3);
6094   match(PrefetchRead mem);
6095   ins_cost(100);
6096 
6097   format %{ "PREFETCHR $mem\t! Prefetch into level 1 cache for read" %}
6098   ins_encode %{
6099     __ prefetchr($mem$$Address);
6100   %}
6101   ins_pipe(ialu_mem);
6102 %}
6103 
6104 instruct prefetchrNTA( memory mem ) %{
6105   predicate(UseSSE>=1 && ReadPrefetchInstr==0);
6106   match(PrefetchRead mem);
6107   ins_cost(100);
6108 
6109   format %{ "PREFETCHNTA $mem\t! Prefetch into non-temporal cache for read" %}
6110   ins_encode %{
6111     __ prefetchnta($mem$$Address);
6112   %}
6113   ins_pipe(ialu_mem);
6114 %}
6115 
6116 instruct prefetchrT0( memory mem ) %{
6117   predicate(UseSSE>=1 && ReadPrefetchInstr==1);
6118   match(PrefetchRead mem);
6119   ins_cost(100);
6120 
6121   format %{ "PREFETCHT0 $mem\t! Prefetch into L1 and L2 caches for read" %}
6122   ins_encode %{
6123     __ prefetcht0($mem$$Address);
6124   %}
6125   ins_pipe(ialu_mem);
6126 %}
6127 
6128 instruct prefetchrT2( memory mem ) %{
6129   predicate(UseSSE>=1 && ReadPrefetchInstr==2);
6130   match(PrefetchRead mem);
6131   ins_cost(100);
6132 
6133   format %{ "PREFETCHT2 $mem\t! Prefetch into L2 cache for read" %}
6134   ins_encode %{
6135     __ prefetcht2($mem$$Address);
6136   %}
6137   ins_pipe(ialu_mem);
6138 %}
6139 
6140 instruct prefetchw0( memory mem ) %{
6141   predicate(UseSSE==0 && !VM_Version::supports_3dnow_prefetch());
6142   match(PrefetchWrite mem);
6143   ins_cost(0);
6144   size(0);
6145   format %{ "Prefetch (non-SSE is empty encoding)" %}
6146   ins_encode();
6147   ins_pipe(empty);
6148 %}
6149 
6150 instruct prefetchw( memory mem ) %{
6151   predicate(UseSSE==0 && VM_Version::supports_3dnow_prefetch());
6152   match( PrefetchWrite mem );
6153   ins_cost(100);
6154 
6155   format %{ "PREFETCHW $mem\t! Prefetch into L1 cache and mark modified" %}
6156   ins_encode %{
6157     __ prefetchw($mem$$Address);
6158   %}
6159   ins_pipe(ialu_mem);
6160 %}
6161 
6162 instruct prefetchwNTA( memory mem ) %{
6163   predicate(UseSSE>=1);
6164   match(PrefetchWrite mem);
6165   ins_cost(100);
6166 
6167   format %{ "PREFETCHNTA $mem\t! Prefetch into non-temporal cache for write" %}
6168   ins_encode %{
6169     __ prefetchnta($mem$$Address);
6170   %}
6171   ins_pipe(ialu_mem);
6172 %}
6173 
6174 // Prefetch instructions for allocation.
6175 
6176 instruct prefetchAlloc0( memory mem ) %{
6177   predicate(UseSSE==0 && AllocatePrefetchInstr!=3);
6178   match(PrefetchAllocation mem);
6179   ins_cost(0);
6180   size(0);
6181   format %{ "Prefetch allocation (non-SSE is empty encoding)" %}
6182   ins_encode();
6183   ins_pipe(empty);
6184 %}
6185 
6186 instruct prefetchAlloc( memory mem ) %{
6187   predicate(AllocatePrefetchInstr==3);
6188   match( PrefetchAllocation mem );
6189   ins_cost(100);
6190 
6191   format %{ "PREFETCHW $mem\t! Prefetch allocation into L1 cache and mark modified" %}
6192   ins_encode %{
6193     __ prefetchw($mem$$Address);
6194   %}
6195   ins_pipe(ialu_mem);
6196 %}
6197 
6198 instruct prefetchAllocNTA( memory mem ) %{
6199   predicate(UseSSE>=1 && AllocatePrefetchInstr==0);
6200   match(PrefetchAllocation mem);
6201   ins_cost(100);
6202 
6203   format %{ "PREFETCHNTA $mem\t! Prefetch allocation into non-temporal cache for write" %}
6204   ins_encode %{
6205     __ prefetchnta($mem$$Address);
6206   %}
6207   ins_pipe(ialu_mem);
6208 %}
6209 
6210 instruct prefetchAllocT0( memory mem ) %{
6211   predicate(UseSSE>=1 && AllocatePrefetchInstr==1);
6212   match(PrefetchAllocation mem);
6213   ins_cost(100);
6214 
6215   format %{ "PREFETCHT0 $mem\t! Prefetch allocation into L1 and L2 caches for write" %}
6216   ins_encode %{
6217     __ prefetcht0($mem$$Address);
6218   %}
6219   ins_pipe(ialu_mem);
6220 %}
6221 
6222 instruct prefetchAllocT2( memory mem ) %{
6223   predicate(UseSSE>=1 && AllocatePrefetchInstr==2);
6224   match(PrefetchAllocation mem);
6225   ins_cost(100);
6226 
6227   format %{ "PREFETCHT2 $mem\t! Prefetch allocation into L2 cache for write" %}
6228   ins_encode %{
6229     __ prefetcht2($mem$$Address);
6230   %}
6231   ins_pipe(ialu_mem);
6232 %}
6233 
6234 //----------Store Instructions-------------------------------------------------
6235 
6236 // Store Byte
6237 instruct storeB(memory mem, xRegI src) %{
6238   match(Set mem (StoreB mem src));
6239 
6240   ins_cost(125);
6241   format %{ "MOV8   $mem,$src" %}
6242   opcode(0x88);
6243   ins_encode( OpcP, RegMem( src, mem ) );
6244   ins_pipe( ialu_mem_reg );
6245 %}
6246 
6247 // Store Char/Short
6248 instruct storeC(memory mem, rRegI src) %{
6249   match(Set mem (StoreC mem src));
6250 
6251   ins_cost(125);
6252   format %{ "MOV16  $mem,$src" %}
6253   opcode(0x89, 0x66);
6254   ins_encode( OpcS, OpcP, RegMem( src, mem ) );
6255   ins_pipe( ialu_mem_reg );
6256 %}
6257 
6258 // Store Integer
6259 instruct storeI(memory mem, rRegI src) %{
6260   match(Set mem (StoreI mem src));
6261 
6262   ins_cost(125);
6263   format %{ "MOV    $mem,$src" %}
6264   opcode(0x89);
6265   ins_encode( OpcP, RegMem( src, mem ) );
6266   ins_pipe( ialu_mem_reg );
6267 %}
6268 
6269 // Store Long
6270 instruct storeL(long_memory mem, eRegL src) %{
6271   predicate(!((StoreLNode*)n)->require_atomic_access());
6272   match(Set mem (StoreL mem src));
6273 
6274   ins_cost(200);
6275   format %{ "MOV    $mem,$src.lo\n\t"
6276             "MOV    $mem+4,$src.hi" %}
6277   opcode(0x89, 0x89);
6278   ins_encode( OpcP, RegMem( src, mem ), OpcS, RegMem_Hi( src, mem ) );
6279   ins_pipe( ialu_mem_long_reg );
6280 %}
6281 
6282 // Store Long to Integer
6283 instruct storeL2I(memory mem, eRegL src) %{
6284   match(Set mem (StoreI mem (ConvL2I src)));
6285 
6286   format %{ "MOV    $mem,$src.lo\t# long -> int" %}
6287   ins_encode %{
6288     __ movl($mem$$Address, $src$$Register);
6289   %}
6290   ins_pipe(ialu_mem_reg);
6291 %}
6292 
6293 // Volatile Store Long.  Must be atomic, so move it into
6294 // the FP TOS and then do a 64-bit FIST.  Has to probe the
6295 // target address before the store (for null-ptr checks)
6296 // so the memory operand is used twice in the encoding.
6297 instruct storeL_volatile(memory mem, stackSlotL src, eFlagsReg cr ) %{
6298   predicate(UseSSE<=1 && ((StoreLNode*)n)->require_atomic_access());
6299   match(Set mem (StoreL mem src));
6300   effect( KILL cr );
6301   ins_cost(400);
6302   format %{ "CMP    $mem,EAX\t# Probe address for implicit null check\n\t"
6303             "FILD   $src\n\t"
6304             "FISTp  $mem\t # 64-bit atomic volatile long store" %}
6305   opcode(0x3B);
6306   ins_encode( OpcP, RegMem( EAX, mem ), enc_storeL_volatile(mem,src));
6307   ins_pipe( fpu_reg_mem );
6308 %}
6309 
6310 instruct storeLX_volatile(memory mem, stackSlotL src, regD tmp, eFlagsReg cr) %{
6311   predicate(UseSSE>=2 && ((StoreLNode*)n)->require_atomic_access());
6312   match(Set mem (StoreL mem src));
6313   effect( TEMP tmp, KILL cr );
6314   ins_cost(380);
6315   format %{ "CMP    $mem,EAX\t# Probe address for implicit null check\n\t"
6316             "MOVSD  $tmp,$src\n\t"
6317             "MOVSD  $mem,$tmp\t # 64-bit atomic volatile long store" %}
6318   ins_encode %{
6319     __ cmpl(rax, $mem$$Address);
6320     __ movdbl($tmp$$XMMRegister, Address(rsp, $src$$disp));
6321     __ movdbl($mem$$Address, $tmp$$XMMRegister);
6322   %}
6323   ins_pipe( pipe_slow );
6324 %}
6325 
6326 instruct storeLX_reg_volatile(memory mem, eRegL src, regD tmp2, regD tmp, eFlagsReg cr) %{
6327   predicate(UseSSE>=2 && ((StoreLNode*)n)->require_atomic_access());
6328   match(Set mem (StoreL mem src));
6329   effect( TEMP tmp2 , TEMP tmp, KILL cr );
6330   ins_cost(360);
6331   format %{ "CMP    $mem,EAX\t# Probe address for implicit null check\n\t"
6332             "MOVD   $tmp,$src.lo\n\t"
6333             "MOVD   $tmp2,$src.hi\n\t"
6334             "PUNPCKLDQ $tmp,$tmp2\n\t"
6335             "MOVSD  $mem,$tmp\t # 64-bit atomic volatile long store" %}
6336   ins_encode %{
6337     __ cmpl(rax, $mem$$Address);
6338     __ movdl($tmp$$XMMRegister, $src$$Register);
6339     __ movdl($tmp2$$XMMRegister, HIGH_FROM_LOW($src$$Register));
6340     __ punpckldq($tmp$$XMMRegister, $tmp2$$XMMRegister);
6341     __ movdbl($mem$$Address, $tmp$$XMMRegister);
6342   %}
6343   ins_pipe( pipe_slow );
6344 %}
6345 
6346 // Store Pointer; for storing unknown oops and raw pointers
6347 instruct storeP(memory mem, anyRegP src) %{
6348   match(Set mem (StoreP mem src));
6349 
6350   ins_cost(125);
6351   format %{ "MOV    $mem,$src" %}
6352   opcode(0x89);
6353   ins_encode( OpcP, RegMem( src, mem ) );
6354   ins_pipe( ialu_mem_reg );
6355 %}
6356 
6357 // Store Integer Immediate
6358 instruct storeImmI(memory mem, immI src) %{
6359   match(Set mem (StoreI mem src));
6360 
6361   ins_cost(150);
6362   format %{ "MOV    $mem,$src" %}
6363   opcode(0xC7);               /* C7 /0 */
6364   ins_encode( OpcP, RMopc_Mem(0x00,mem),  Con32( src ));
6365   ins_pipe( ialu_mem_imm );
6366 %}
6367 
6368 // Store Short/Char Immediate
6369 instruct storeImmI16(memory mem, immI16 src) %{
6370   predicate(UseStoreImmI16);
6371   match(Set mem (StoreC mem src));
6372 
6373   ins_cost(150);
6374   format %{ "MOV16  $mem,$src" %}
6375   opcode(0xC7);     /* C7 /0 Same as 32 store immediate with prefix */
6376   ins_encode( SizePrefix, OpcP, RMopc_Mem(0x00,mem),  Con16( src ));
6377   ins_pipe( ialu_mem_imm );
6378 %}
6379 
6380 // Store Pointer Immediate; null pointers or constant oops that do not
6381 // need card-mark barriers.
6382 instruct storeImmP(memory mem, immP src) %{
6383   match(Set mem (StoreP mem src));
6384 
6385   ins_cost(150);
6386   format %{ "MOV    $mem,$src" %}
6387   opcode(0xC7);               /* C7 /0 */
6388   ins_encode( OpcP, RMopc_Mem(0x00,mem),  Con32( src ));
6389   ins_pipe( ialu_mem_imm );
6390 %}
6391 
6392 // Store Byte Immediate
6393 instruct storeImmB(memory mem, immI8 src) %{
6394   match(Set mem (StoreB mem src));
6395 
6396   ins_cost(150);
6397   format %{ "MOV8   $mem,$src" %}
6398   opcode(0xC6);               /* C6 /0 */
6399   ins_encode( OpcP, RMopc_Mem(0x00,mem),  Con8or32( src ));
6400   ins_pipe( ialu_mem_imm );
6401 %}
6402 
6403 // Store CMS card-mark Immediate
6404 instruct storeImmCM(memory mem, immI8 src) %{
6405   match(Set mem (StoreCM mem src));
6406 
6407   ins_cost(150);
6408   format %{ "MOV8   $mem,$src\t! CMS card-mark imm0" %}
6409   opcode(0xC6);               /* C6 /0 */
6410   ins_encode( OpcP, RMopc_Mem(0x00,mem),  Con8or32( src ));
6411   ins_pipe( ialu_mem_imm );
6412 %}
6413 
6414 // Store Double
6415 instruct storeDPR( memory mem, regDPR1 src) %{
6416   predicate(UseSSE<=1);
6417   match(Set mem (StoreD mem src));
6418 
6419   ins_cost(100);
6420   format %{ "FST_D  $mem,$src" %}
6421   opcode(0xDD);       /* DD /2 */
6422   ins_encode( enc_FPR_store(mem,src) );
6423   ins_pipe( fpu_mem_reg );
6424 %}
6425 
6426 // Store double does rounding on x86
6427 instruct storeDPR_rounded( memory mem, regDPR1 src) %{
6428   predicate(UseSSE<=1);
6429   match(Set mem (StoreD mem (RoundDouble src)));
6430 
6431   ins_cost(100);
6432   format %{ "FST_D  $mem,$src\t# round" %}
6433   opcode(0xDD);       /* DD /2 */
6434   ins_encode( enc_FPR_store(mem,src) );
6435   ins_pipe( fpu_mem_reg );
6436 %}
6437 
6438 // Store XMM register to memory (double-precision floating points)
6439 // MOVSD instruction
6440 instruct storeD(memory mem, regD src) %{
6441   predicate(UseSSE>=2);
6442   match(Set mem (StoreD mem src));
6443   ins_cost(95);
6444   format %{ "MOVSD  $mem,$src" %}
6445   ins_encode %{
6446     __ movdbl($mem$$Address, $src$$XMMRegister);
6447   %}
6448   ins_pipe( pipe_slow );
6449 %}
6450 
6451 // Store XMM register to memory (single-precision floating point)
6452 // MOVSS instruction
6453 instruct storeF(memory mem, regF src) %{
6454   predicate(UseSSE>=1);
6455   match(Set mem (StoreF mem src));
6456   ins_cost(95);
6457   format %{ "MOVSS  $mem,$src" %}
6458   ins_encode %{
6459     __ movflt($mem$$Address, $src$$XMMRegister);
6460   %}
6461   ins_pipe( pipe_slow );
6462 %}
6463 
6464 // Store Float
6465 instruct storeFPR( memory mem, regFPR1 src) %{
6466   predicate(UseSSE==0);
6467   match(Set mem (StoreF mem src));
6468 
6469   ins_cost(100);
6470   format %{ "FST_S  $mem,$src" %}
6471   opcode(0xD9);       /* D9 /2 */
6472   ins_encode( enc_FPR_store(mem,src) );
6473   ins_pipe( fpu_mem_reg );
6474 %}
6475 
6476 // Store Float does rounding on x86
6477 instruct storeFPR_rounded( memory mem, regFPR1 src) %{
6478   predicate(UseSSE==0);
6479   match(Set mem (StoreF mem (RoundFloat src)));
6480 
6481   ins_cost(100);
6482   format %{ "FST_S  $mem,$src\t# round" %}
6483   opcode(0xD9);       /* D9 /2 */
6484   ins_encode( enc_FPR_store(mem,src) );
6485   ins_pipe( fpu_mem_reg );
6486 %}
6487 
6488 // Store Float does rounding on x86
6489 instruct storeFPR_Drounded( memory mem, regDPR1 src) %{
6490   predicate(UseSSE<=1);
6491   match(Set mem (StoreF mem (ConvD2F src)));
6492 
6493   ins_cost(100);
6494   format %{ "FST_S  $mem,$src\t# D-round" %}
6495   opcode(0xD9);       /* D9 /2 */
6496   ins_encode( enc_FPR_store(mem,src) );
6497   ins_pipe( fpu_mem_reg );
6498 %}
6499 
6500 // Store immediate Float value (it is faster than store from FPU register)
6501 // The instruction usage is guarded by predicate in operand immFPR().
6502 instruct storeFPR_imm( memory mem, immFPR src) %{
6503   match(Set mem (StoreF mem src));
6504 
6505   ins_cost(50);
6506   format %{ "MOV    $mem,$src\t# store float" %}
6507   opcode(0xC7);               /* C7 /0 */
6508   ins_encode( OpcP, RMopc_Mem(0x00,mem),  Con32FPR_as_bits( src ));
6509   ins_pipe( ialu_mem_imm );
6510 %}
6511 
6512 // Store immediate Float value (it is faster than store from XMM register)
6513 // The instruction usage is guarded by predicate in operand immF().
6514 instruct storeF_imm( memory mem, immF src) %{
6515   match(Set mem (StoreF mem src));
6516 
6517   ins_cost(50);
6518   format %{ "MOV    $mem,$src\t# store float" %}
6519   opcode(0xC7);               /* C7 /0 */
6520   ins_encode( OpcP, RMopc_Mem(0x00,mem),  Con32F_as_bits( src ));
6521   ins_pipe( ialu_mem_imm );
6522 %}
6523 
6524 // Store Integer to stack slot
6525 instruct storeSSI(stackSlotI dst, rRegI src) %{
6526   match(Set dst src);
6527 
6528   ins_cost(100);
6529   format %{ "MOV    $dst,$src" %}
6530   opcode(0x89);
6531   ins_encode( OpcPRegSS( dst, src ) );
6532   ins_pipe( ialu_mem_reg );
6533 %}
6534 
6535 // Store Integer to stack slot
6536 instruct storeSSP(stackSlotP dst, eRegP src) %{
6537   match(Set dst src);
6538 
6539   ins_cost(100);
6540   format %{ "MOV    $dst,$src" %}
6541   opcode(0x89);
6542   ins_encode( OpcPRegSS( dst, src ) );
6543   ins_pipe( ialu_mem_reg );
6544 %}
6545 
6546 // Store Long to stack slot
6547 instruct storeSSL(stackSlotL dst, eRegL src) %{
6548   match(Set dst src);
6549 
6550   ins_cost(200);
6551   format %{ "MOV    $dst,$src.lo\n\t"
6552             "MOV    $dst+4,$src.hi" %}
6553   opcode(0x89, 0x89);
6554   ins_encode( OpcP, RegMem( src, dst ), OpcS, RegMem_Hi( src, dst ) );
6555   ins_pipe( ialu_mem_long_reg );
6556 %}
6557 
6558 //----------MemBar Instructions-----------------------------------------------
6559 // Memory barrier flavors
6560 
6561 instruct membar_acquire() %{
6562   match(MemBarAcquire);
6563   match(LoadFence);
6564   ins_cost(400);
6565 
6566   size(0);
6567   format %{ "MEMBAR-acquire ! (empty encoding)" %}
6568   ins_encode();
6569   ins_pipe(empty);
6570 %}
6571 
6572 instruct membar_acquire_lock() %{
6573   match(MemBarAcquireLock);
6574   ins_cost(0);
6575 
6576   size(0);
6577   format %{ "MEMBAR-acquire (prior CMPXCHG in FastLock so empty encoding)" %}
6578   ins_encode( );
6579   ins_pipe(empty);
6580 %}
6581 
6582 instruct membar_release() %{
6583   match(MemBarRelease);
6584   match(StoreFence);
6585   ins_cost(400);
6586 
6587   size(0);
6588   format %{ "MEMBAR-release ! (empty encoding)" %}
6589   ins_encode( );
6590   ins_pipe(empty);
6591 %}
6592 
6593 instruct membar_release_lock() %{
6594   match(MemBarReleaseLock);
6595   ins_cost(0);
6596 
6597   size(0);
6598   format %{ "MEMBAR-release (a FastUnlock follows so empty encoding)" %}
6599   ins_encode( );
6600   ins_pipe(empty);
6601 %}
6602 
6603 instruct membar_volatile(eFlagsReg cr) %{
6604   match(MemBarVolatile);
6605   effect(KILL cr);
6606   ins_cost(400);
6607 
6608   format %{ 
6609     $$template
6610     if (os::is_MP()) {
6611       $$emit$$"LOCK ADDL [ESP + #0], 0\t! membar_volatile"
6612     } else {
6613       $$emit$$"MEMBAR-volatile ! (empty encoding)"
6614     }
6615   %}
6616   ins_encode %{
6617     __ membar(Assembler::StoreLoad);
6618   %}
6619   ins_pipe(pipe_slow);
6620 %}
6621 
6622 instruct unnecessary_membar_volatile() %{
6623   match(MemBarVolatile);
6624   predicate(Matcher::post_store_load_barrier(n));
6625   ins_cost(0);
6626 
6627   size(0);
6628   format %{ "MEMBAR-volatile (unnecessary so empty encoding)" %}
6629   ins_encode( );
6630   ins_pipe(empty);
6631 %}
6632 
6633 instruct membar_storestore() %{
6634   match(MemBarStoreStore);
6635   ins_cost(0);
6636 
6637   size(0);
6638   format %{ "MEMBAR-storestore (empty encoding)" %}
6639   ins_encode( );
6640   ins_pipe(empty);
6641 %}
6642 
6643 //----------Move Instructions--------------------------------------------------
6644 instruct castX2P(eAXRegP dst, eAXRegI src) %{
6645   match(Set dst (CastX2P src));
6646   format %{ "# X2P  $dst, $src" %}
6647   ins_encode( /*empty encoding*/ );
6648   ins_cost(0);
6649   ins_pipe(empty);
6650 %}
6651 
6652 instruct castP2X(rRegI dst, eRegP src ) %{
6653   match(Set dst (CastP2X src));
6654   ins_cost(50);
6655   format %{ "MOV    $dst, $src\t# CastP2X" %}
6656   ins_encode( enc_Copy( dst, src) );
6657   ins_pipe( ialu_reg_reg );
6658 %}
6659 
6660 //----------Conditional Move---------------------------------------------------
6661 // Conditional move
6662 instruct jmovI_reg(cmpOp cop, eFlagsReg cr, rRegI dst, rRegI src) %{
6663   predicate(!VM_Version::supports_cmov() );
6664   match(Set dst (CMoveI (Binary cop cr) (Binary dst src)));
6665   ins_cost(200);
6666   format %{ "J$cop,us skip\t# signed cmove\n\t"
6667             "MOV    $dst,$src\n"
6668       "skip:" %}
6669   ins_encode %{
6670     Label Lskip;
6671     // Invert sense of branch from sense of CMOV
6672     __ jccb((Assembler::Condition)($cop$$cmpcode^1), Lskip);
6673     __ movl($dst$$Register, $src$$Register);
6674     __ bind(Lskip);
6675   %}
6676   ins_pipe( pipe_cmov_reg );
6677 %}
6678 
6679 instruct jmovI_regU(cmpOpU cop, eFlagsRegU cr, rRegI dst, rRegI src) %{
6680   predicate(!VM_Version::supports_cmov() );
6681   match(Set dst (CMoveI (Binary cop cr) (Binary dst src)));
6682   ins_cost(200);
6683   format %{ "J$cop,us skip\t# unsigned cmove\n\t"
6684             "MOV    $dst,$src\n"
6685       "skip:" %}
6686   ins_encode %{
6687     Label Lskip;
6688     // Invert sense of branch from sense of CMOV
6689     __ jccb((Assembler::Condition)($cop$$cmpcode^1), Lskip);
6690     __ movl($dst$$Register, $src$$Register);
6691     __ bind(Lskip);
6692   %}
6693   ins_pipe( pipe_cmov_reg );
6694 %}
6695 
6696 instruct cmovI_reg(rRegI dst, rRegI src, eFlagsReg cr, cmpOp cop ) %{
6697   predicate(VM_Version::supports_cmov() );
6698   match(Set dst (CMoveI (Binary cop cr) (Binary dst src)));
6699   ins_cost(200);
6700   format %{ "CMOV$cop $dst,$src" %}
6701   opcode(0x0F,0x40);
6702   ins_encode( enc_cmov(cop), RegReg( dst, src ) );
6703   ins_pipe( pipe_cmov_reg );
6704 %}
6705 
6706 instruct cmovI_regU( cmpOpU cop, eFlagsRegU cr, rRegI dst, rRegI src ) %{
6707   predicate(VM_Version::supports_cmov() );
6708   match(Set dst (CMoveI (Binary cop cr) (Binary dst src)));
6709   ins_cost(200);
6710   format %{ "CMOV$cop $dst,$src" %}
6711   opcode(0x0F,0x40);
6712   ins_encode( enc_cmov(cop), RegReg( dst, src ) );
6713   ins_pipe( pipe_cmov_reg );
6714 %}
6715 
6716 instruct cmovI_regUCF( cmpOpUCF cop, eFlagsRegUCF cr, rRegI dst, rRegI src ) %{
6717   predicate(VM_Version::supports_cmov() );
6718   match(Set dst (CMoveI (Binary cop cr) (Binary dst src)));
6719   ins_cost(200);
6720   expand %{
6721     cmovI_regU(cop, cr, dst, src);
6722   %}
6723 %}
6724 
6725 // Conditional move
6726 instruct cmovI_mem(cmpOp cop, eFlagsReg cr, rRegI dst, memory src) %{
6727   predicate(VM_Version::supports_cmov() );
6728   match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src))));
6729   ins_cost(250);
6730   format %{ "CMOV$cop $dst,$src" %}
6731   opcode(0x0F,0x40);
6732   ins_encode( enc_cmov(cop), RegMem( dst, src ) );
6733   ins_pipe( pipe_cmov_mem );
6734 %}
6735 
6736 // Conditional move
6737 instruct cmovI_memU(cmpOpU cop, eFlagsRegU cr, rRegI dst, memory src) %{
6738   predicate(VM_Version::supports_cmov() );
6739   match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src))));
6740   ins_cost(250);
6741   format %{ "CMOV$cop $dst,$src" %}
6742   opcode(0x0F,0x40);
6743   ins_encode( enc_cmov(cop), RegMem( dst, src ) );
6744   ins_pipe( pipe_cmov_mem );
6745 %}
6746 
6747 instruct cmovI_memUCF(cmpOpUCF cop, eFlagsRegUCF cr, rRegI dst, memory src) %{
6748   predicate(VM_Version::supports_cmov() );
6749   match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src))));
6750   ins_cost(250);
6751   expand %{
6752     cmovI_memU(cop, cr, dst, src);
6753   %}
6754 %}
6755 
6756 // Conditional move
6757 instruct cmovP_reg(eRegP dst, eRegP src, eFlagsReg cr, cmpOp cop ) %{
6758   predicate(VM_Version::supports_cmov() );
6759   match(Set dst (CMoveP (Binary cop cr) (Binary dst src)));
6760   ins_cost(200);
6761   format %{ "CMOV$cop $dst,$src\t# ptr" %}
6762   opcode(0x0F,0x40);
6763   ins_encode( enc_cmov(cop), RegReg( dst, src ) );
6764   ins_pipe( pipe_cmov_reg );
6765 %}
6766 
6767 // Conditional move (non-P6 version)
6768 // Note:  a CMoveP is generated for  stubs and native wrappers
6769 //        regardless of whether we are on a P6, so we
6770 //        emulate a cmov here
6771 instruct cmovP_reg_nonP6(eRegP dst, eRegP src, eFlagsReg cr, cmpOp cop ) %{
6772   match(Set dst (CMoveP (Binary cop cr) (Binary dst src)));
6773   ins_cost(300);
6774   format %{ "Jn$cop   skip\n\t"
6775           "MOV    $dst,$src\t# pointer\n"
6776       "skip:" %}
6777   opcode(0x8b);
6778   ins_encode( enc_cmov_branch(cop, 0x2), OpcP, RegReg(dst, src));
6779   ins_pipe( pipe_cmov_reg );
6780 %}
6781 
6782 // Conditional move
6783 instruct cmovP_regU(cmpOpU cop, eFlagsRegU cr, eRegP dst, eRegP src ) %{
6784   predicate(VM_Version::supports_cmov() );
6785   match(Set dst (CMoveP (Binary cop cr) (Binary dst src)));
6786   ins_cost(200);
6787   format %{ "CMOV$cop $dst,$src\t# ptr" %}
6788   opcode(0x0F,0x40);
6789   ins_encode( enc_cmov(cop), RegReg( dst, src ) );
6790   ins_pipe( pipe_cmov_reg );
6791 %}
6792 
6793 instruct cmovP_regUCF(cmpOpUCF cop, eFlagsRegUCF cr, eRegP dst, eRegP src ) %{
6794   predicate(VM_Version::supports_cmov() );
6795   match(Set dst (CMoveP (Binary cop cr) (Binary dst src)));
6796   ins_cost(200);
6797   expand %{
6798     cmovP_regU(cop, cr, dst, src);
6799   %}
6800 %}
6801 
6802 // DISABLED: Requires the ADLC to emit a bottom_type call that
6803 // correctly meets the two pointer arguments; one is an incoming
6804 // register but the other is a memory operand.  ALSO appears to
6805 // be buggy with implicit null checks.
6806 //
6807 //// Conditional move
6808 //instruct cmovP_mem(cmpOp cop, eFlagsReg cr, eRegP dst, memory src) %{
6809 //  predicate(VM_Version::supports_cmov() );
6810 //  match(Set dst (CMoveP (Binary cop cr) (Binary dst (LoadP src))));
6811 //  ins_cost(250);
6812 //  format %{ "CMOV$cop $dst,$src\t# ptr" %}
6813 //  opcode(0x0F,0x40);
6814 //  ins_encode( enc_cmov(cop), RegMem( dst, src ) );
6815 //  ins_pipe( pipe_cmov_mem );
6816 //%}
6817 //
6818 //// Conditional move
6819 //instruct cmovP_memU(cmpOpU cop, eFlagsRegU cr, eRegP dst, memory src) %{
6820 //  predicate(VM_Version::supports_cmov() );
6821 //  match(Set dst (CMoveP (Binary cop cr) (Binary dst (LoadP src))));
6822 //  ins_cost(250);
6823 //  format %{ "CMOV$cop $dst,$src\t# ptr" %}
6824 //  opcode(0x0F,0x40);
6825 //  ins_encode( enc_cmov(cop), RegMem( dst, src ) );
6826 //  ins_pipe( pipe_cmov_mem );
6827 //%}
6828 
6829 // Conditional move
6830 instruct fcmovDPR_regU(cmpOp_fcmov cop, eFlagsRegU cr, regDPR1 dst, regDPR src) %{
6831   predicate(UseSSE<=1);
6832   match(Set dst (CMoveD (Binary cop cr) (Binary dst src)));
6833   ins_cost(200);
6834   format %{ "FCMOV$cop $dst,$src\t# double" %}
6835   opcode(0xDA);
6836   ins_encode( enc_cmov_dpr(cop,src) );
6837   ins_pipe( pipe_cmovDPR_reg );
6838 %}
6839 
6840 // Conditional move
6841 instruct fcmovFPR_regU(cmpOp_fcmov cop, eFlagsRegU cr, regFPR1 dst, regFPR src) %{
6842   predicate(UseSSE==0);
6843   match(Set dst (CMoveF (Binary cop cr) (Binary dst src)));
6844   ins_cost(200);
6845   format %{ "FCMOV$cop $dst,$src\t# float" %}
6846   opcode(0xDA);
6847   ins_encode( enc_cmov_dpr(cop,src) );
6848   ins_pipe( pipe_cmovDPR_reg );
6849 %}
6850 
6851 // Float CMOV on Intel doesn't handle *signed* compares, only unsigned.
6852 instruct fcmovDPR_regS(cmpOp cop, eFlagsReg cr, regDPR dst, regDPR src) %{
6853   predicate(UseSSE<=1);
6854   match(Set dst (CMoveD (Binary cop cr) (Binary dst src)));
6855   ins_cost(200);
6856   format %{ "Jn$cop   skip\n\t"
6857             "MOV    $dst,$src\t# double\n"
6858       "skip:" %}
6859   opcode (0xdd, 0x3);     /* DD D8+i or DD /3 */
6860   ins_encode( enc_cmov_branch( cop, 0x4 ), Push_Reg_DPR(src), OpcP, RegOpc(dst) );
6861   ins_pipe( pipe_cmovDPR_reg );
6862 %}
6863 
6864 // Float CMOV on Intel doesn't handle *signed* compares, only unsigned.
6865 instruct fcmovFPR_regS(cmpOp cop, eFlagsReg cr, regFPR dst, regFPR src) %{
6866   predicate(UseSSE==0);
6867   match(Set dst (CMoveF (Binary cop cr) (Binary dst src)));
6868   ins_cost(200);
6869   format %{ "Jn$cop    skip\n\t"
6870             "MOV    $dst,$src\t# float\n"
6871       "skip:" %}
6872   opcode (0xdd, 0x3);     /* DD D8+i or DD /3 */
6873   ins_encode( enc_cmov_branch( cop, 0x4 ), Push_Reg_FPR(src), OpcP, RegOpc(dst) );
6874   ins_pipe( pipe_cmovDPR_reg );
6875 %}
6876 
6877 // No CMOVE with SSE/SSE2
6878 instruct fcmovF_regS(cmpOp cop, eFlagsReg cr, regF dst, regF src) %{
6879   predicate (UseSSE>=1);
6880   match(Set dst (CMoveF (Binary cop cr) (Binary dst src)));
6881   ins_cost(200);
6882   format %{ "Jn$cop   skip\n\t"
6883             "MOVSS  $dst,$src\t# float\n"
6884       "skip:" %}
6885   ins_encode %{
6886     Label skip;
6887     // Invert sense of branch from sense of CMOV
6888     __ jccb((Assembler::Condition)($cop$$cmpcode^1), skip);
6889     __ movflt($dst$$XMMRegister, $src$$XMMRegister);
6890     __ bind(skip);
6891   %}
6892   ins_pipe( pipe_slow );
6893 %}
6894 
6895 // No CMOVE with SSE/SSE2
6896 instruct fcmovD_regS(cmpOp cop, eFlagsReg cr, regD dst, regD src) %{
6897   predicate (UseSSE>=2);
6898   match(Set dst (CMoveD (Binary cop cr) (Binary dst src)));
6899   ins_cost(200);
6900   format %{ "Jn$cop   skip\n\t"
6901             "MOVSD  $dst,$src\t# float\n"
6902       "skip:" %}
6903   ins_encode %{
6904     Label skip;
6905     // Invert sense of branch from sense of CMOV
6906     __ jccb((Assembler::Condition)($cop$$cmpcode^1), skip);
6907     __ movdbl($dst$$XMMRegister, $src$$XMMRegister);
6908     __ bind(skip);
6909   %}
6910   ins_pipe( pipe_slow );
6911 %}
6912 
6913 // unsigned version
6914 instruct fcmovF_regU(cmpOpU cop, eFlagsRegU cr, regF dst, regF src) %{
6915   predicate (UseSSE>=1);
6916   match(Set dst (CMoveF (Binary cop cr) (Binary dst src)));
6917   ins_cost(200);
6918   format %{ "Jn$cop   skip\n\t"
6919             "MOVSS  $dst,$src\t# float\n"
6920       "skip:" %}
6921   ins_encode %{
6922     Label skip;
6923     // Invert sense of branch from sense of CMOV
6924     __ jccb((Assembler::Condition)($cop$$cmpcode^1), skip);
6925     __ movflt($dst$$XMMRegister, $src$$XMMRegister);
6926     __ bind(skip);
6927   %}
6928   ins_pipe( pipe_slow );
6929 %}
6930 
6931 instruct fcmovF_regUCF(cmpOpUCF cop, eFlagsRegUCF cr, regF dst, regF src) %{
6932   predicate (UseSSE>=1);
6933   match(Set dst (CMoveF (Binary cop cr) (Binary dst src)));
6934   ins_cost(200);
6935   expand %{
6936     fcmovF_regU(cop, cr, dst, src);
6937   %}
6938 %}
6939 
6940 // unsigned version
6941 instruct fcmovD_regU(cmpOpU cop, eFlagsRegU cr, regD dst, regD src) %{
6942   predicate (UseSSE>=2);
6943   match(Set dst (CMoveD (Binary cop cr) (Binary dst src)));
6944   ins_cost(200);
6945   format %{ "Jn$cop   skip\n\t"
6946             "MOVSD  $dst,$src\t# float\n"
6947       "skip:" %}
6948   ins_encode %{
6949     Label skip;
6950     // Invert sense of branch from sense of CMOV
6951     __ jccb((Assembler::Condition)($cop$$cmpcode^1), skip);
6952     __ movdbl($dst$$XMMRegister, $src$$XMMRegister);
6953     __ bind(skip);
6954   %}
6955   ins_pipe( pipe_slow );
6956 %}
6957 
6958 instruct fcmovD_regUCF(cmpOpUCF cop, eFlagsRegUCF cr, regD dst, regD src) %{
6959   predicate (UseSSE>=2);
6960   match(Set dst (CMoveD (Binary cop cr) (Binary dst src)));
6961   ins_cost(200);
6962   expand %{
6963     fcmovD_regU(cop, cr, dst, src);
6964   %}
6965 %}
6966 
6967 instruct cmovL_reg(cmpOp cop, eFlagsReg cr, eRegL dst, eRegL src) %{
6968   predicate(VM_Version::supports_cmov() );
6969   match(Set dst (CMoveL (Binary cop cr) (Binary dst src)));
6970   ins_cost(200);
6971   format %{ "CMOV$cop $dst.lo,$src.lo\n\t"
6972             "CMOV$cop $dst.hi,$src.hi" %}
6973   opcode(0x0F,0x40);
6974   ins_encode( enc_cmov(cop), RegReg_Lo2( dst, src ), enc_cmov(cop), RegReg_Hi2( dst, src ) );
6975   ins_pipe( pipe_cmov_reg_long );
6976 %}
6977 
6978 instruct cmovL_regU(cmpOpU cop, eFlagsRegU cr, eRegL dst, eRegL src) %{
6979   predicate(VM_Version::supports_cmov() );
6980   match(Set dst (CMoveL (Binary cop cr) (Binary dst src)));
6981   ins_cost(200);
6982   format %{ "CMOV$cop $dst.lo,$src.lo\n\t"
6983             "CMOV$cop $dst.hi,$src.hi" %}
6984   opcode(0x0F,0x40);
6985   ins_encode( enc_cmov(cop), RegReg_Lo2( dst, src ), enc_cmov(cop), RegReg_Hi2( dst, src ) );
6986   ins_pipe( pipe_cmov_reg_long );
6987 %}
6988 
6989 instruct cmovL_regUCF(cmpOpUCF cop, eFlagsRegUCF cr, eRegL dst, eRegL src) %{
6990   predicate(VM_Version::supports_cmov() );
6991   match(Set dst (CMoveL (Binary cop cr) (Binary dst src)));
6992   ins_cost(200);
6993   expand %{
6994     cmovL_regU(cop, cr, dst, src);
6995   %}
6996 %}
6997 
6998 //----------Arithmetic Instructions--------------------------------------------
6999 //----------Addition Instructions----------------------------------------------
7000 
7001 // Integer Addition Instructions
7002 instruct addI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{
7003   match(Set dst (AddI dst src));
7004   effect(KILL cr);
7005 
7006   size(2);
7007   format %{ "ADD    $dst,$src" %}
7008   opcode(0x03);
7009   ins_encode( OpcP, RegReg( dst, src) );
7010   ins_pipe( ialu_reg_reg );
7011 %}
7012 
7013 instruct addI_eReg_imm(rRegI dst, immI src, eFlagsReg cr) %{
7014   match(Set dst (AddI dst src));
7015   effect(KILL cr);
7016 
7017   format %{ "ADD    $dst,$src" %}
7018   opcode(0x81, 0x00); /* /0 id */
7019   ins_encode( OpcSErm( dst, src ), Con8or32( src ) );
7020   ins_pipe( ialu_reg );
7021 %}
7022 
7023 instruct incI_eReg(rRegI dst, immI1 src, eFlagsReg cr) %{
7024   predicate(UseIncDec);
7025   match(Set dst (AddI dst src));
7026   effect(KILL cr);
7027 
7028   size(1);
7029   format %{ "INC    $dst" %}
7030   opcode(0x40); /*  */
7031   ins_encode( Opc_plus( primary, dst ) );
7032   ins_pipe( ialu_reg );
7033 %}
7034 
7035 instruct leaI_eReg_immI(rRegI dst, rRegI src0, immI src1) %{
7036   match(Set dst (AddI src0 src1));
7037   ins_cost(110);
7038 
7039   format %{ "LEA    $dst,[$src0 + $src1]" %}
7040   opcode(0x8D); /* 0x8D /r */
7041   ins_encode( OpcP, RegLea( dst, src0, src1 ) );
7042   ins_pipe( ialu_reg_reg );
7043 %}
7044 
7045 instruct leaP_eReg_immI(eRegP dst, eRegP src0, immI src1) %{
7046   match(Set dst (AddP src0 src1));
7047   ins_cost(110);
7048 
7049   format %{ "LEA    $dst,[$src0 + $src1]\t# ptr" %}
7050   opcode(0x8D); /* 0x8D /r */
7051   ins_encode( OpcP, RegLea( dst, src0, src1 ) );
7052   ins_pipe( ialu_reg_reg );
7053 %}
7054 
7055 instruct decI_eReg(rRegI dst, immI_M1 src, eFlagsReg cr) %{
7056   predicate(UseIncDec);
7057   match(Set dst (AddI dst src));
7058   effect(KILL cr);
7059 
7060   size(1);
7061   format %{ "DEC    $dst" %}
7062   opcode(0x48); /*  */
7063   ins_encode( Opc_plus( primary, dst ) );
7064   ins_pipe( ialu_reg );
7065 %}
7066 
7067 instruct addP_eReg(eRegP dst, rRegI src, eFlagsReg cr) %{
7068   match(Set dst (AddP dst src));
7069   effect(KILL cr);
7070 
7071   size(2);
7072   format %{ "ADD    $dst,$src" %}
7073   opcode(0x03);
7074   ins_encode( OpcP, RegReg( dst, src) );
7075   ins_pipe( ialu_reg_reg );
7076 %}
7077 
7078 instruct addP_eReg_imm(eRegP dst, immI src, eFlagsReg cr) %{
7079   match(Set dst (AddP dst src));
7080   effect(KILL cr);
7081 
7082   format %{ "ADD    $dst,$src" %}
7083   opcode(0x81,0x00); /* Opcode 81 /0 id */
7084   // ins_encode( RegImm( dst, src) );
7085   ins_encode( OpcSErm( dst, src ), Con8or32( src ) );
7086   ins_pipe( ialu_reg );
7087 %}
7088 
7089 instruct addI_eReg_mem(rRegI dst, memory src, eFlagsReg cr) %{
7090   match(Set dst (AddI dst (LoadI src)));
7091   effect(KILL cr);
7092 
7093   ins_cost(125);
7094   format %{ "ADD    $dst,$src" %}
7095   opcode(0x03);
7096   ins_encode( OpcP, RegMem( dst, src) );
7097   ins_pipe( ialu_reg_mem );
7098 %}
7099 
7100 instruct addI_mem_eReg(memory dst, rRegI src, eFlagsReg cr) %{
7101   match(Set dst (StoreI dst (AddI (LoadI dst) src)));
7102   effect(KILL cr);
7103 
7104   ins_cost(150);
7105   format %{ "ADD    $dst,$src" %}
7106   opcode(0x01);  /* Opcode 01 /r */
7107   ins_encode( OpcP, RegMem( src, dst ) );
7108   ins_pipe( ialu_mem_reg );
7109 %}
7110 
7111 // Add Memory with Immediate
7112 instruct addI_mem_imm(memory dst, immI src, eFlagsReg cr) %{
7113   match(Set dst (StoreI dst (AddI (LoadI dst) src)));
7114   effect(KILL cr);
7115 
7116   ins_cost(125);
7117   format %{ "ADD    $dst,$src" %}
7118   opcode(0x81);               /* Opcode 81 /0 id */
7119   ins_encode( OpcSE( src ), RMopc_Mem(0x00,dst), Con8or32( src ) );
7120   ins_pipe( ialu_mem_imm );
7121 %}
7122 
7123 instruct incI_mem(memory dst, immI1 src, eFlagsReg cr) %{
7124   match(Set dst (StoreI dst (AddI (LoadI dst) src)));
7125   effect(KILL cr);
7126 
7127   ins_cost(125);
7128   format %{ "INC    $dst" %}
7129   opcode(0xFF);               /* Opcode FF /0 */
7130   ins_encode( OpcP, RMopc_Mem(0x00,dst));
7131   ins_pipe( ialu_mem_imm );
7132 %}
7133 
7134 instruct decI_mem(memory dst, immI_M1 src, eFlagsReg cr) %{
7135   match(Set dst (StoreI dst (AddI (LoadI dst) src)));
7136   effect(KILL cr);
7137 
7138   ins_cost(125);
7139   format %{ "DEC    $dst" %}
7140   opcode(0xFF);               /* Opcode FF /1 */
7141   ins_encode( OpcP, RMopc_Mem(0x01,dst));
7142   ins_pipe( ialu_mem_imm );
7143 %}
7144 
7145 
7146 instruct checkCastPP( eRegP dst ) %{
7147   match(Set dst (CheckCastPP dst));
7148 
7149   size(0);
7150   format %{ "#checkcastPP of $dst" %}
7151   ins_encode( /*empty encoding*/ );
7152   ins_pipe( empty );
7153 %}
7154 
7155 instruct castPP( eRegP dst ) %{
7156   match(Set dst (CastPP dst));
7157   format %{ "#castPP of $dst" %}
7158   ins_encode( /*empty encoding*/ );
7159   ins_pipe( empty );
7160 %}
7161 
7162 instruct castII( rRegI dst ) %{
7163   match(Set dst (CastII dst));
7164   format %{ "#castII of $dst" %}
7165   ins_encode( /*empty encoding*/ );
7166   ins_cost(0);
7167   ins_pipe( empty );
7168 %}
7169 
7170 
7171 // Load-locked - same as a regular pointer load when used with compare-swap
7172 instruct loadPLocked(eRegP dst, memory mem) %{
7173   match(Set dst (LoadPLocked mem));
7174 
7175   ins_cost(125);
7176   format %{ "MOV    $dst,$mem\t# Load ptr. locked" %}
7177   opcode(0x8B);
7178   ins_encode( OpcP, RegMem(dst,mem));
7179   ins_pipe( ialu_reg_mem );
7180 %}
7181 
7182 // Conditional-store of the updated heap-top.
7183 // Used during allocation of the shared heap.
7184 // Sets flags (EQ) on success.  Implemented with a CMPXCHG on Intel.
7185 instruct storePConditional( memory heap_top_ptr, eAXRegP oldval, eRegP newval, eFlagsReg cr ) %{
7186   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
7187   // EAX is killed if there is contention, but then it's also unused.
7188   // In the common case of no contention, EAX holds the new oop address.
7189   format %{ "CMPXCHG $heap_top_ptr,$newval\t# If EAX==$heap_top_ptr Then store $newval into $heap_top_ptr" %}
7190   ins_encode( lock_prefix, Opcode(0x0F), Opcode(0xB1), RegMem(newval,heap_top_ptr) );
7191   ins_pipe( pipe_cmpxchg );
7192 %}
7193 
7194 // Conditional-store of an int value.
7195 // ZF flag is set on success, reset otherwise.  Implemented with a CMPXCHG on Intel.
7196 instruct storeIConditional( memory mem, eAXRegI oldval, rRegI newval, eFlagsReg cr ) %{
7197   match(Set cr (StoreIConditional mem (Binary oldval newval)));
7198   effect(KILL oldval);
7199   format %{ "CMPXCHG $mem,$newval\t# If EAX==$mem Then store $newval into $mem" %}
7200   ins_encode( lock_prefix, Opcode(0x0F), Opcode(0xB1), RegMem(newval, mem) );
7201   ins_pipe( pipe_cmpxchg );
7202 %}
7203 
7204 // Conditional-store of a long value.
7205 // ZF flag is set on success, reset otherwise.  Implemented with a CMPXCHG8 on Intel.
7206 instruct storeLConditional( memory mem, eADXRegL oldval, eBCXRegL newval, eFlagsReg cr ) %{
7207   match(Set cr (StoreLConditional mem (Binary oldval newval)));
7208   effect(KILL oldval);
7209   format %{ "XCHG   EBX,ECX\t# correct order for CMPXCHG8 instruction\n\t"
7210             "CMPXCHG8 $mem,ECX:EBX\t# If EDX:EAX==$mem Then store ECX:EBX into $mem\n\t"
7211             "XCHG   EBX,ECX"
7212   %}
7213   ins_encode %{
7214     // Note: we need to swap rbx, and rcx before and after the
7215     //       cmpxchg8 instruction because the instruction uses
7216     //       rcx as the high order word of the new value to store but
7217     //       our register encoding uses rbx.
7218     __ xchgl(as_Register(EBX_enc), as_Register(ECX_enc));
7219     if( os::is_MP() )
7220       __ lock();
7221     __ cmpxchg8($mem$$Address);
7222     __ xchgl(as_Register(EBX_enc), as_Register(ECX_enc));
7223   %}
7224   ins_pipe( pipe_cmpxchg );
7225 %}
7226 
7227 // No flag versions for CompareAndSwap{P,I,L} because matcher can't match them
7228 
7229 instruct compareAndSwapL( rRegI res, eSIRegP mem_ptr, eADXRegL oldval, eBCXRegL newval, eFlagsReg cr ) %{
7230   predicate(VM_Version::supports_cx8());
7231   match(Set res (CompareAndSwapL mem_ptr (Binary oldval newval)));
7232   effect(KILL cr, KILL oldval);
7233   format %{ "CMPXCHG8 [$mem_ptr],$newval\t# If EDX:EAX==[$mem_ptr] Then store $newval into [$mem_ptr]\n\t"
7234             "MOV    $res,0\n\t"
7235             "JNE,s  fail\n\t"
7236             "MOV    $res,1\n"
7237           "fail:" %}
7238   ins_encode( enc_cmpxchg8(mem_ptr),
7239               enc_flags_ne_to_boolean(res) );
7240   ins_pipe( pipe_cmpxchg );
7241 %}
7242 
7243 instruct compareAndSwapP( rRegI res,  pRegP mem_ptr, eAXRegP oldval, eCXRegP newval, eFlagsReg cr) %{
7244   match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
7245   effect(KILL cr, KILL oldval);
7246   format %{ "CMPXCHG [$mem_ptr],$newval\t# If EAX==[$mem_ptr] Then store $newval into [$mem_ptr]\n\t"
7247             "MOV    $res,0\n\t"
7248             "JNE,s  fail\n\t"
7249             "MOV    $res,1\n"
7250           "fail:" %}
7251   ins_encode( enc_cmpxchg(mem_ptr), enc_flags_ne_to_boolean(res) );
7252   ins_pipe( pipe_cmpxchg );
7253 %}
7254 
7255 instruct compareAndSwapI( rRegI res, pRegP mem_ptr, eAXRegI oldval, eCXRegI newval, eFlagsReg cr) %{
7256   match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
7257   effect(KILL cr, KILL oldval);
7258   format %{ "CMPXCHG [$mem_ptr],$newval\t# If EAX==[$mem_ptr] Then store $newval into [$mem_ptr]\n\t"
7259             "MOV    $res,0\n\t"
7260             "JNE,s  fail\n\t"
7261             "MOV    $res,1\n"
7262           "fail:" %}
7263   ins_encode( enc_cmpxchg(mem_ptr), enc_flags_ne_to_boolean(res) );
7264   ins_pipe( pipe_cmpxchg );
7265 %}
7266 
7267 instruct xaddI_no_res( memory mem, Universe dummy, immI add, eFlagsReg cr) %{
7268   predicate(n->as_LoadStore()->result_not_used());
7269   match(Set dummy (GetAndAddI mem add));
7270   effect(KILL cr);
7271   format %{ "ADDL  [$mem],$add" %}
7272   ins_encode %{
7273     if (os::is_MP()) { __ lock(); }
7274     __ addl($mem$$Address, $add$$constant);
7275   %}
7276   ins_pipe( pipe_cmpxchg );
7277 %}
7278 
7279 instruct xaddI( memory mem, rRegI newval, eFlagsReg cr) %{
7280   match(Set newval (GetAndAddI mem newval));
7281   effect(KILL cr);
7282   format %{ "XADDL  [$mem],$newval" %}
7283   ins_encode %{
7284     if (os::is_MP()) { __ lock(); }
7285     __ xaddl($mem$$Address, $newval$$Register);
7286   %}
7287   ins_pipe( pipe_cmpxchg );
7288 %}
7289 
7290 instruct xchgI( memory mem, rRegI newval) %{
7291   match(Set newval (GetAndSetI mem newval));
7292   format %{ "XCHGL  $newval,[$mem]" %}
7293   ins_encode %{
7294     __ xchgl($newval$$Register, $mem$$Address);
7295   %}
7296   ins_pipe( pipe_cmpxchg );
7297 %}
7298 
7299 instruct xchgP( memory mem, pRegP newval) %{
7300   match(Set newval (GetAndSetP mem newval));
7301   format %{ "XCHGL  $newval,[$mem]" %}
7302   ins_encode %{
7303     __ xchgl($newval$$Register, $mem$$Address);
7304   %}
7305   ins_pipe( pipe_cmpxchg );
7306 %}
7307 
7308 //----------Subtraction Instructions-------------------------------------------
7309 
7310 // Integer Subtraction Instructions
7311 instruct subI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{
7312   match(Set dst (SubI dst src));
7313   effect(KILL cr);
7314 
7315   size(2);
7316   format %{ "SUB    $dst,$src" %}
7317   opcode(0x2B);
7318   ins_encode( OpcP, RegReg( dst, src) );
7319   ins_pipe( ialu_reg_reg );
7320 %}
7321 
7322 instruct subI_eReg_imm(rRegI dst, immI src, eFlagsReg cr) %{
7323   match(Set dst (SubI dst src));
7324   effect(KILL cr);
7325 
7326   format %{ "SUB    $dst,$src" %}
7327   opcode(0x81,0x05);  /* Opcode 81 /5 */
7328   // ins_encode( RegImm( dst, src) );
7329   ins_encode( OpcSErm( dst, src ), Con8or32( src ) );
7330   ins_pipe( ialu_reg );
7331 %}
7332 
7333 instruct subI_eReg_mem(rRegI dst, memory src, eFlagsReg cr) %{
7334   match(Set dst (SubI dst (LoadI src)));
7335   effect(KILL cr);
7336 
7337   ins_cost(125);
7338   format %{ "SUB    $dst,$src" %}
7339   opcode(0x2B);
7340   ins_encode( OpcP, RegMem( dst, src) );
7341   ins_pipe( ialu_reg_mem );
7342 %}
7343 
7344 instruct subI_mem_eReg(memory dst, rRegI src, eFlagsReg cr) %{
7345   match(Set dst (StoreI dst (SubI (LoadI dst) src)));
7346   effect(KILL cr);
7347 
7348   ins_cost(150);
7349   format %{ "SUB    $dst,$src" %}
7350   opcode(0x29);  /* Opcode 29 /r */
7351   ins_encode( OpcP, RegMem( src, dst ) );
7352   ins_pipe( ialu_mem_reg );
7353 %}
7354 
7355 // Subtract from a pointer
7356 instruct subP_eReg(eRegP dst, rRegI src, immI0 zero, eFlagsReg cr) %{
7357   match(Set dst (AddP dst (SubI zero src)));
7358   effect(KILL cr);
7359 
7360   size(2);
7361   format %{ "SUB    $dst,$src" %}
7362   opcode(0x2B);
7363   ins_encode( OpcP, RegReg( dst, src) );
7364   ins_pipe( ialu_reg_reg );
7365 %}
7366 
7367 instruct negI_eReg(rRegI dst, immI0 zero, eFlagsReg cr) %{
7368   match(Set dst (SubI zero dst));
7369   effect(KILL cr);
7370 
7371   size(2);
7372   format %{ "NEG    $dst" %}
7373   opcode(0xF7,0x03);  // Opcode F7 /3
7374   ins_encode( OpcP, RegOpc( dst ) );
7375   ins_pipe( ialu_reg );
7376 %}
7377 
7378 //----------Multiplication/Division Instructions-------------------------------
7379 // Integer Multiplication Instructions
7380 // Multiply Register
7381 instruct mulI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{
7382   match(Set dst (MulI dst src));
7383   effect(KILL cr);
7384 
7385   size(3);
7386   ins_cost(300);
7387   format %{ "IMUL   $dst,$src" %}
7388   opcode(0xAF, 0x0F);
7389   ins_encode( OpcS, OpcP, RegReg( dst, src) );
7390   ins_pipe( ialu_reg_reg_alu0 );
7391 %}
7392 
7393 // Multiply 32-bit Immediate
7394 instruct mulI_eReg_imm(rRegI dst, rRegI src, immI imm, eFlagsReg cr) %{
7395   match(Set dst (MulI src imm));
7396   effect(KILL cr);
7397 
7398   ins_cost(300);
7399   format %{ "IMUL   $dst,$src,$imm" %}
7400   opcode(0x69);  /* 69 /r id */
7401   ins_encode( OpcSE(imm), RegReg( dst, src ), Con8or32( imm ) );
7402   ins_pipe( ialu_reg_reg_alu0 );
7403 %}
7404 
7405 instruct loadConL_low_only(eADXRegL_low_only dst, immL32 src, eFlagsReg cr) %{
7406   match(Set dst src);
7407   effect(KILL cr);
7408 
7409   // Note that this is artificially increased to make it more expensive than loadConL
7410   ins_cost(250);
7411   format %{ "MOV    EAX,$src\t// low word only" %}
7412   opcode(0xB8);
7413   ins_encode( LdImmL_Lo(dst, src) );
7414   ins_pipe( ialu_reg_fat );
7415 %}
7416 
7417 // Multiply by 32-bit Immediate, taking the shifted high order results
7418 //  (special case for shift by 32)
7419 instruct mulI_imm_high(eDXRegI dst, nadxRegI src1, eADXRegL_low_only src2, immI_32 cnt, eFlagsReg cr) %{
7420   match(Set dst (ConvL2I (RShiftL (MulL (ConvI2L src1) src2) cnt)));
7421   predicate( _kids[0]->_kids[0]->_kids[1]->_leaf->Opcode() == Op_ConL &&
7422              _kids[0]->_kids[0]->_kids[1]->_leaf->as_Type()->type()->is_long()->get_con() >= min_jint &&
7423              _kids[0]->_kids[0]->_kids[1]->_leaf->as_Type()->type()->is_long()->get_con() <= max_jint );
7424   effect(USE src1, KILL cr);
7425 
7426   // Note that this is adjusted by 150 to compensate for the overcosting of loadConL_low_only
7427   ins_cost(0*100 + 1*400 - 150);
7428   format %{ "IMUL   EDX:EAX,$src1" %}
7429   ins_encode( multiply_con_and_shift_high( dst, src1, src2, cnt, cr ) );
7430   ins_pipe( pipe_slow );
7431 %}
7432 
7433 // Multiply by 32-bit Immediate, taking the shifted high order results
7434 instruct mulI_imm_RShift_high(eDXRegI dst, nadxRegI src1, eADXRegL_low_only src2, immI_32_63 cnt, eFlagsReg cr) %{
7435   match(Set dst (ConvL2I (RShiftL (MulL (ConvI2L src1) src2) cnt)));
7436   predicate( _kids[0]->_kids[0]->_kids[1]->_leaf->Opcode() == Op_ConL &&
7437              _kids[0]->_kids[0]->_kids[1]->_leaf->as_Type()->type()->is_long()->get_con() >= min_jint &&
7438              _kids[0]->_kids[0]->_kids[1]->_leaf->as_Type()->type()->is_long()->get_con() <= max_jint );
7439   effect(USE src1, KILL cr);
7440 
7441   // Note that this is adjusted by 150 to compensate for the overcosting of loadConL_low_only
7442   ins_cost(1*100 + 1*400 - 150);
7443   format %{ "IMUL   EDX:EAX,$src1\n\t"
7444             "SAR    EDX,$cnt-32" %}
7445   ins_encode( multiply_con_and_shift_high( dst, src1, src2, cnt, cr ) );
7446   ins_pipe( pipe_slow );
7447 %}
7448 
7449 // Multiply Memory 32-bit Immediate
7450 instruct mulI_mem_imm(rRegI dst, memory src, immI imm, eFlagsReg cr) %{
7451   match(Set dst (MulI (LoadI src) imm));
7452   effect(KILL cr);
7453 
7454   ins_cost(300);
7455   format %{ "IMUL   $dst,$src,$imm" %}
7456   opcode(0x69);  /* 69 /r id */
7457   ins_encode( OpcSE(imm), RegMem( dst, src ), Con8or32( imm ) );
7458   ins_pipe( ialu_reg_mem_alu0 );
7459 %}
7460 
7461 // Multiply Memory
7462 instruct mulI(rRegI dst, memory src, eFlagsReg cr) %{
7463   match(Set dst (MulI dst (LoadI src)));
7464   effect(KILL cr);
7465 
7466   ins_cost(350);
7467   format %{ "IMUL   $dst,$src" %}
7468   opcode(0xAF, 0x0F);
7469   ins_encode( OpcS, OpcP, RegMem( dst, src) );
7470   ins_pipe( ialu_reg_mem_alu0 );
7471 %}
7472 
7473 // Multiply Register Int to Long
7474 instruct mulI2L(eADXRegL dst, eAXRegI src, nadxRegI src1, eFlagsReg flags) %{
7475   // Basic Idea: long = (long)int * (long)int
7476   match(Set dst (MulL (ConvI2L src) (ConvI2L src1)));
7477   effect(DEF dst, USE src, USE src1, KILL flags);
7478 
7479   ins_cost(300);
7480   format %{ "IMUL   $dst,$src1" %}
7481 
7482   ins_encode( long_int_multiply( dst, src1 ) );
7483   ins_pipe( ialu_reg_reg_alu0 );
7484 %}
7485 
7486 instruct mulIS_eReg(eADXRegL dst, immL_32bits mask, eFlagsReg flags, eAXRegI src, nadxRegI src1) %{
7487   // Basic Idea:  long = (int & 0xffffffffL) * (int & 0xffffffffL)
7488   match(Set dst (MulL (AndL (ConvI2L src) mask) (AndL (ConvI2L src1) mask)));
7489   effect(KILL flags);
7490 
7491   ins_cost(300);
7492   format %{ "MUL    $dst,$src1" %}
7493 
7494   ins_encode( long_uint_multiply(dst, src1) );
7495   ins_pipe( ialu_reg_reg_alu0 );
7496 %}
7497 
7498 // Multiply Register Long
7499 instruct mulL_eReg(eADXRegL dst, eRegL src, rRegI tmp, eFlagsReg cr) %{
7500   match(Set dst (MulL dst src));
7501   effect(KILL cr, TEMP tmp);
7502   ins_cost(4*100+3*400);
7503 // Basic idea: lo(result) = lo(x_lo * y_lo)
7504 //             hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi)
7505   format %{ "MOV    $tmp,$src.lo\n\t"
7506             "IMUL   $tmp,EDX\n\t"
7507             "MOV    EDX,$src.hi\n\t"
7508             "IMUL   EDX,EAX\n\t"
7509             "ADD    $tmp,EDX\n\t"
7510             "MUL    EDX:EAX,$src.lo\n\t"
7511             "ADD    EDX,$tmp" %}
7512   ins_encode( long_multiply( dst, src, tmp ) );
7513   ins_pipe( pipe_slow );
7514 %}
7515 
7516 // Multiply Register Long where the left operand's high 32 bits are zero
7517 instruct mulL_eReg_lhi0(eADXRegL dst, eRegL src, rRegI tmp, eFlagsReg cr) %{
7518   predicate(is_operand_hi32_zero(n->in(1)));
7519   match(Set dst (MulL dst src));
7520   effect(KILL cr, TEMP tmp);
7521   ins_cost(2*100+2*400);
7522 // Basic idea: lo(result) = lo(x_lo * y_lo)
7523 //             hi(result) = hi(x_lo * y_lo) + lo(x_lo * y_hi) where lo(x_hi * y_lo) = 0 because x_hi = 0
7524   format %{ "MOV    $tmp,$src.hi\n\t"
7525             "IMUL   $tmp,EAX\n\t"
7526             "MUL    EDX:EAX,$src.lo\n\t"
7527             "ADD    EDX,$tmp" %}
7528   ins_encode %{
7529     __ movl($tmp$$Register, HIGH_FROM_LOW($src$$Register));
7530     __ imull($tmp$$Register, rax);
7531     __ mull($src$$Register);
7532     __ addl(rdx, $tmp$$Register);
7533   %}
7534   ins_pipe( pipe_slow );
7535 %}
7536 
7537 // Multiply Register Long where the right operand's high 32 bits are zero
7538 instruct mulL_eReg_rhi0(eADXRegL dst, eRegL src, rRegI tmp, eFlagsReg cr) %{
7539   predicate(is_operand_hi32_zero(n->in(2)));
7540   match(Set dst (MulL dst src));
7541   effect(KILL cr, TEMP tmp);
7542   ins_cost(2*100+2*400);
7543 // Basic idea: lo(result) = lo(x_lo * y_lo)
7544 //             hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) where lo(x_lo * y_hi) = 0 because y_hi = 0
7545   format %{ "MOV    $tmp,$src.lo\n\t"
7546             "IMUL   $tmp,EDX\n\t"
7547             "MUL    EDX:EAX,$src.lo\n\t"
7548             "ADD    EDX,$tmp" %}
7549   ins_encode %{
7550     __ movl($tmp$$Register, $src$$Register);
7551     __ imull($tmp$$Register, rdx);
7552     __ mull($src$$Register);
7553     __ addl(rdx, $tmp$$Register);
7554   %}
7555   ins_pipe( pipe_slow );
7556 %}
7557 
7558 // Multiply Register Long where the left and the right operands' high 32 bits are zero
7559 instruct mulL_eReg_hi0(eADXRegL dst, eRegL src, eFlagsReg cr) %{
7560   predicate(is_operand_hi32_zero(n->in(1)) && is_operand_hi32_zero(n->in(2)));
7561   match(Set dst (MulL dst src));
7562   effect(KILL cr);
7563   ins_cost(1*400);
7564 // Basic idea: lo(result) = lo(x_lo * y_lo)
7565 //             hi(result) = hi(x_lo * y_lo) where lo(x_hi * y_lo) = 0 and lo(x_lo * y_hi) = 0 because x_hi = 0 and y_hi = 0
7566   format %{ "MUL    EDX:EAX,$src.lo\n\t" %}
7567   ins_encode %{
7568     __ mull($src$$Register);
7569   %}
7570   ins_pipe( pipe_slow );
7571 %}
7572 
7573 // Multiply Register Long by small constant
7574 instruct mulL_eReg_con(eADXRegL dst, immL_127 src, rRegI tmp, eFlagsReg cr) %{
7575   match(Set dst (MulL dst src));
7576   effect(KILL cr, TEMP tmp);
7577   ins_cost(2*100+2*400);
7578   size(12);
7579 // Basic idea: lo(result) = lo(src * EAX)
7580 //             hi(result) = hi(src * EAX) + lo(src * EDX)
7581   format %{ "IMUL   $tmp,EDX,$src\n\t"
7582             "MOV    EDX,$src\n\t"
7583             "MUL    EDX\t# EDX*EAX -> EDX:EAX\n\t"
7584             "ADD    EDX,$tmp" %}
7585   ins_encode( long_multiply_con( dst, src, tmp ) );
7586   ins_pipe( pipe_slow );
7587 %}
7588 
7589 // Integer DIV with Register
7590 instruct divI_eReg(eAXRegI rax, eDXRegI rdx, eCXRegI div, eFlagsReg cr) %{
7591   match(Set rax (DivI rax div));
7592   effect(KILL rdx, KILL cr);
7593   size(26);
7594   ins_cost(30*100+10*100);
7595   format %{ "CMP    EAX,0x80000000\n\t"
7596             "JNE,s  normal\n\t"
7597             "XOR    EDX,EDX\n\t"
7598             "CMP    ECX,-1\n\t"
7599             "JE,s   done\n"
7600     "normal: CDQ\n\t"
7601             "IDIV   $div\n\t"
7602     "done:"        %}
7603   opcode(0xF7, 0x7);  /* Opcode F7 /7 */
7604   ins_encode( cdq_enc, OpcP, RegOpc(div) );
7605   ins_pipe( ialu_reg_reg_alu0 );
7606 %}
7607 
7608 // Divide Register Long
7609 instruct divL_eReg( eADXRegL dst, eRegL src1, eRegL src2, eFlagsReg cr, eCXRegI cx, eBXRegI bx ) %{
7610   match(Set dst (DivL src1 src2));
7611   effect( KILL cr, KILL cx, KILL bx );
7612   ins_cost(10000);
7613   format %{ "PUSH   $src1.hi\n\t"
7614             "PUSH   $src1.lo\n\t"
7615             "PUSH   $src2.hi\n\t"
7616             "PUSH   $src2.lo\n\t"
7617             "CALL   SharedRuntime::ldiv\n\t"
7618             "ADD    ESP,16" %}
7619   ins_encode( long_div(src1,src2) );
7620   ins_pipe( pipe_slow );
7621 %}
7622 
7623 // Integer DIVMOD with Register, both quotient and mod results
7624 instruct divModI_eReg_divmod(eAXRegI rax, eDXRegI rdx, eCXRegI div, eFlagsReg cr) %{
7625   match(DivModI rax div);
7626   effect(KILL cr);
7627   size(26);
7628   ins_cost(30*100+10*100);
7629   format %{ "CMP    EAX,0x80000000\n\t"
7630             "JNE,s  normal\n\t"
7631             "XOR    EDX,EDX\n\t"
7632             "CMP    ECX,-1\n\t"
7633             "JE,s   done\n"
7634     "normal: CDQ\n\t"
7635             "IDIV   $div\n\t"
7636     "done:"        %}
7637   opcode(0xF7, 0x7);  /* Opcode F7 /7 */
7638   ins_encode( cdq_enc, OpcP, RegOpc(div) );
7639   ins_pipe( pipe_slow );
7640 %}
7641 
7642 // Integer MOD with Register
7643 instruct modI_eReg(eDXRegI rdx, eAXRegI rax, eCXRegI div, eFlagsReg cr) %{
7644   match(Set rdx (ModI rax div));
7645   effect(KILL rax, KILL cr);
7646 
7647   size(26);
7648   ins_cost(300);
7649   format %{ "CDQ\n\t"
7650             "IDIV   $div" %}
7651   opcode(0xF7, 0x7);  /* Opcode F7 /7 */
7652   ins_encode( cdq_enc, OpcP, RegOpc(div) );
7653   ins_pipe( ialu_reg_reg_alu0 );
7654 %}
7655 
7656 // Remainder Register Long
7657 instruct modL_eReg( eADXRegL dst, eRegL src1, eRegL src2, eFlagsReg cr, eCXRegI cx, eBXRegI bx ) %{
7658   match(Set dst (ModL src1 src2));
7659   effect( KILL cr, KILL cx, KILL bx );
7660   ins_cost(10000);
7661   format %{ "PUSH   $src1.hi\n\t"
7662             "PUSH   $src1.lo\n\t"
7663             "PUSH   $src2.hi\n\t"
7664             "PUSH   $src2.lo\n\t"
7665             "CALL   SharedRuntime::lrem\n\t"
7666             "ADD    ESP,16" %}
7667   ins_encode( long_mod(src1,src2) );
7668   ins_pipe( pipe_slow );
7669 %}
7670 
7671 // Divide Register Long (no special case since divisor != -1)
7672 instruct divL_eReg_imm32( eADXRegL dst, immL32 imm, rRegI tmp, rRegI tmp2, eFlagsReg cr ) %{
7673   match(Set dst (DivL dst imm));
7674   effect( TEMP tmp, TEMP tmp2, KILL cr );
7675   ins_cost(1000);
7676   format %{ "MOV    $tmp,abs($imm) # ldiv EDX:EAX,$imm\n\t"
7677             "XOR    $tmp2,$tmp2\n\t"
7678             "CMP    $tmp,EDX\n\t"
7679             "JA,s   fast\n\t"
7680             "MOV    $tmp2,EAX\n\t"
7681             "MOV    EAX,EDX\n\t"
7682             "MOV    EDX,0\n\t"
7683             "JLE,s  pos\n\t"
7684             "LNEG   EAX : $tmp2\n\t"
7685             "DIV    $tmp # unsigned division\n\t"
7686             "XCHG   EAX,$tmp2\n\t"
7687             "DIV    $tmp\n\t"
7688             "LNEG   $tmp2 : EAX\n\t"
7689             "JMP,s  done\n"
7690     "pos:\n\t"
7691             "DIV    $tmp\n\t"
7692             "XCHG   EAX,$tmp2\n"
7693     "fast:\n\t"
7694             "DIV    $tmp\n"
7695     "done:\n\t"
7696             "MOV    EDX,$tmp2\n\t"
7697             "NEG    EDX:EAX # if $imm < 0" %}
7698   ins_encode %{
7699     int con = (int)$imm$$constant;
7700     assert(con != 0 && con != -1 && con != min_jint, "wrong divisor");
7701     int pcon = (con > 0) ? con : -con;
7702     Label Lfast, Lpos, Ldone;
7703 
7704     __ movl($tmp$$Register, pcon);
7705     __ xorl($tmp2$$Register,$tmp2$$Register);
7706     __ cmpl($tmp$$Register, HIGH_FROM_LOW($dst$$Register));
7707     __ jccb(Assembler::above, Lfast); // result fits into 32 bit
7708 
7709     __ movl($tmp2$$Register, $dst$$Register); // save
7710     __ movl($dst$$Register, HIGH_FROM_LOW($dst$$Register));
7711     __ movl(HIGH_FROM_LOW($dst$$Register),0); // preserve flags
7712     __ jccb(Assembler::lessEqual, Lpos); // result is positive
7713 
7714     // Negative dividend.
7715     // convert value to positive to use unsigned division
7716     __ lneg($dst$$Register, $tmp2$$Register);
7717     __ divl($tmp$$Register);
7718     __ xchgl($dst$$Register, $tmp2$$Register);
7719     __ divl($tmp$$Register);
7720     // revert result back to negative
7721     __ lneg($tmp2$$Register, $dst$$Register);
7722     __ jmpb(Ldone);
7723 
7724     __ bind(Lpos);
7725     __ divl($tmp$$Register); // Use unsigned division
7726     __ xchgl($dst$$Register, $tmp2$$Register);
7727     // Fallthrow for final divide, tmp2 has 32 bit hi result
7728 
7729     __ bind(Lfast);
7730     // fast path: src is positive
7731     __ divl($tmp$$Register); // Use unsigned division
7732 
7733     __ bind(Ldone);
7734     __ movl(HIGH_FROM_LOW($dst$$Register),$tmp2$$Register);
7735     if (con < 0) {
7736       __ lneg(HIGH_FROM_LOW($dst$$Register), $dst$$Register);
7737     }
7738   %}
7739   ins_pipe( pipe_slow );
7740 %}
7741 
7742 // Remainder Register Long (remainder fit into 32 bits)
7743 instruct modL_eReg_imm32( eADXRegL dst, immL32 imm, rRegI tmp, rRegI tmp2, eFlagsReg cr ) %{
7744   match(Set dst (ModL dst imm));
7745   effect( TEMP tmp, TEMP tmp2, KILL cr );
7746   ins_cost(1000);
7747   format %{ "MOV    $tmp,abs($imm) # lrem EDX:EAX,$imm\n\t"
7748             "CMP    $tmp,EDX\n\t"
7749             "JA,s   fast\n\t"
7750             "MOV    $tmp2,EAX\n\t"
7751             "MOV    EAX,EDX\n\t"
7752             "MOV    EDX,0\n\t"
7753             "JLE,s  pos\n\t"
7754             "LNEG   EAX : $tmp2\n\t"
7755             "DIV    $tmp # unsigned division\n\t"
7756             "MOV    EAX,$tmp2\n\t"
7757             "DIV    $tmp\n\t"
7758             "NEG    EDX\n\t"
7759             "JMP,s  done\n"
7760     "pos:\n\t"
7761             "DIV    $tmp\n\t"
7762             "MOV    EAX,$tmp2\n"
7763     "fast:\n\t"
7764             "DIV    $tmp\n"
7765     "done:\n\t"
7766             "MOV    EAX,EDX\n\t"
7767             "SAR    EDX,31\n\t" %}
7768   ins_encode %{
7769     int con = (int)$imm$$constant;
7770     assert(con != 0 && con != -1 && con != min_jint, "wrong divisor");
7771     int pcon = (con > 0) ? con : -con;
7772     Label  Lfast, Lpos, Ldone;
7773 
7774     __ movl($tmp$$Register, pcon);
7775     __ cmpl($tmp$$Register, HIGH_FROM_LOW($dst$$Register));
7776     __ jccb(Assembler::above, Lfast); // src is positive and result fits into 32 bit
7777 
7778     __ movl($tmp2$$Register, $dst$$Register); // save
7779     __ movl($dst$$Register, HIGH_FROM_LOW($dst$$Register));
7780     __ movl(HIGH_FROM_LOW($dst$$Register),0); // preserve flags
7781     __ jccb(Assembler::lessEqual, Lpos); // result is positive
7782 
7783     // Negative dividend.
7784     // convert value to positive to use unsigned division
7785     __ lneg($dst$$Register, $tmp2$$Register);
7786     __ divl($tmp$$Register);
7787     __ movl($dst$$Register, $tmp2$$Register);
7788     __ divl($tmp$$Register);
7789     // revert remainder back to negative
7790     __ negl(HIGH_FROM_LOW($dst$$Register));
7791     __ jmpb(Ldone);
7792 
7793     __ bind(Lpos);
7794     __ divl($tmp$$Register);
7795     __ movl($dst$$Register, $tmp2$$Register);
7796 
7797     __ bind(Lfast);
7798     // fast path: src is positive
7799     __ divl($tmp$$Register);
7800 
7801     __ bind(Ldone);
7802     __ movl($dst$$Register, HIGH_FROM_LOW($dst$$Register));
7803     __ sarl(HIGH_FROM_LOW($dst$$Register), 31); // result sign
7804 
7805   %}
7806   ins_pipe( pipe_slow );
7807 %}
7808 
7809 // Integer Shift Instructions
7810 // Shift Left by one
7811 instruct shlI_eReg_1(rRegI dst, immI1 shift, eFlagsReg cr) %{
7812   match(Set dst (LShiftI dst shift));
7813   effect(KILL cr);
7814 
7815   size(2);
7816   format %{ "SHL    $dst,$shift" %}
7817   opcode(0xD1, 0x4);  /* D1 /4 */
7818   ins_encode( OpcP, RegOpc( dst ) );
7819   ins_pipe( ialu_reg );
7820 %}
7821 
7822 // Shift Left by 8-bit immediate
7823 instruct salI_eReg_imm(rRegI dst, immI8 shift, eFlagsReg cr) %{
7824   match(Set dst (LShiftI dst shift));
7825   effect(KILL cr);
7826 
7827   size(3);
7828   format %{ "SHL    $dst,$shift" %}
7829   opcode(0xC1, 0x4);  /* C1 /4 ib */
7830   ins_encode( RegOpcImm( dst, shift) );
7831   ins_pipe( ialu_reg );
7832 %}
7833 
7834 // Shift Left by variable
7835 instruct salI_eReg_CL(rRegI dst, eCXRegI shift, eFlagsReg cr) %{
7836   match(Set dst (LShiftI dst shift));
7837   effect(KILL cr);
7838 
7839   size(2);
7840   format %{ "SHL    $dst,$shift" %}
7841   opcode(0xD3, 0x4);  /* D3 /4 */
7842   ins_encode( OpcP, RegOpc( dst ) );
7843   ins_pipe( ialu_reg_reg );
7844 %}
7845 
7846 // Arithmetic shift right by one
7847 instruct sarI_eReg_1(rRegI dst, immI1 shift, eFlagsReg cr) %{
7848   match(Set dst (RShiftI dst shift));
7849   effect(KILL cr);
7850 
7851   size(2);
7852   format %{ "SAR    $dst,$shift" %}
7853   opcode(0xD1, 0x7);  /* D1 /7 */
7854   ins_encode( OpcP, RegOpc( dst ) );
7855   ins_pipe( ialu_reg );
7856 %}
7857 
7858 // Arithmetic shift right by one
7859 instruct sarI_mem_1(memory dst, immI1 shift, eFlagsReg cr) %{
7860   match(Set dst (StoreI dst (RShiftI (LoadI dst) shift)));
7861   effect(KILL cr);
7862   format %{ "SAR    $dst,$shift" %}
7863   opcode(0xD1, 0x7);  /* D1 /7 */
7864   ins_encode( OpcP, RMopc_Mem(secondary,dst) );
7865   ins_pipe( ialu_mem_imm );
7866 %}
7867 
7868 // Arithmetic Shift Right by 8-bit immediate
7869 instruct sarI_eReg_imm(rRegI dst, immI8 shift, eFlagsReg cr) %{
7870   match(Set dst (RShiftI dst shift));
7871   effect(KILL cr);
7872 
7873   size(3);
7874   format %{ "SAR    $dst,$shift" %}
7875   opcode(0xC1, 0x7);  /* C1 /7 ib */
7876   ins_encode( RegOpcImm( dst, shift ) );
7877   ins_pipe( ialu_mem_imm );
7878 %}
7879 
7880 // Arithmetic Shift Right by 8-bit immediate
7881 instruct sarI_mem_imm(memory dst, immI8 shift, eFlagsReg cr) %{
7882   match(Set dst (StoreI dst (RShiftI (LoadI dst) shift)));
7883   effect(KILL cr);
7884 
7885   format %{ "SAR    $dst,$shift" %}
7886   opcode(0xC1, 0x7);  /* C1 /7 ib */
7887   ins_encode( OpcP, RMopc_Mem(secondary, dst ), Con8or32( shift ) );
7888   ins_pipe( ialu_mem_imm );
7889 %}
7890 
7891 // Arithmetic Shift Right by variable
7892 instruct sarI_eReg_CL(rRegI dst, eCXRegI shift, eFlagsReg cr) %{
7893   match(Set dst (RShiftI dst shift));
7894   effect(KILL cr);
7895 
7896   size(2);
7897   format %{ "SAR    $dst,$shift" %}
7898   opcode(0xD3, 0x7);  /* D3 /7 */
7899   ins_encode( OpcP, RegOpc( dst ) );
7900   ins_pipe( ialu_reg_reg );
7901 %}
7902 
7903 // Logical shift right by one
7904 instruct shrI_eReg_1(rRegI dst, immI1 shift, eFlagsReg cr) %{
7905   match(Set dst (URShiftI dst shift));
7906   effect(KILL cr);
7907 
7908   size(2);
7909   format %{ "SHR    $dst,$shift" %}
7910   opcode(0xD1, 0x5);  /* D1 /5 */
7911   ins_encode( OpcP, RegOpc( dst ) );
7912   ins_pipe( ialu_reg );
7913 %}
7914 
7915 // Logical Shift Right by 8-bit immediate
7916 instruct shrI_eReg_imm(rRegI dst, immI8 shift, eFlagsReg cr) %{
7917   match(Set dst (URShiftI dst shift));
7918   effect(KILL cr);
7919 
7920   size(3);
7921   format %{ "SHR    $dst,$shift" %}
7922   opcode(0xC1, 0x5);  /* C1 /5 ib */
7923   ins_encode( RegOpcImm( dst, shift) );
7924   ins_pipe( ialu_reg );
7925 %}
7926 
7927 
7928 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
7929 // This idiom is used by the compiler for the i2b bytecode.
7930 instruct i2b(rRegI dst, xRegI src, immI_24 twentyfour) %{
7931   match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
7932 
7933   size(3);
7934   format %{ "MOVSX  $dst,$src :8" %}
7935   ins_encode %{
7936     __ movsbl($dst$$Register, $src$$Register);
7937   %}
7938   ins_pipe(ialu_reg_reg);
7939 %}
7940 
7941 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
7942 // This idiom is used by the compiler the i2s bytecode.
7943 instruct i2s(rRegI dst, xRegI src, immI_16 sixteen) %{
7944   match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
7945 
7946   size(3);
7947   format %{ "MOVSX  $dst,$src :16" %}
7948   ins_encode %{
7949     __ movswl($dst$$Register, $src$$Register);
7950   %}
7951   ins_pipe(ialu_reg_reg);
7952 %}
7953 
7954 
7955 // Logical Shift Right by variable
7956 instruct shrI_eReg_CL(rRegI dst, eCXRegI shift, eFlagsReg cr) %{
7957   match(Set dst (URShiftI dst shift));
7958   effect(KILL cr);
7959 
7960   size(2);
7961   format %{ "SHR    $dst,$shift" %}
7962   opcode(0xD3, 0x5);  /* D3 /5 */
7963   ins_encode( OpcP, RegOpc( dst ) );
7964   ins_pipe( ialu_reg_reg );
7965 %}
7966 
7967 
7968 //----------Logical Instructions-----------------------------------------------
7969 //----------Integer Logical Instructions---------------------------------------
7970 // And Instructions
7971 // And Register with Register
7972 instruct andI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{
7973   match(Set dst (AndI dst src));
7974   effect(KILL cr);
7975 
7976   size(2);
7977   format %{ "AND    $dst,$src" %}
7978   opcode(0x23);
7979   ins_encode( OpcP, RegReg( dst, src) );
7980   ins_pipe( ialu_reg_reg );
7981 %}
7982 
7983 // And Register with Immediate
7984 instruct andI_eReg_imm(rRegI dst, immI src, eFlagsReg cr) %{
7985   match(Set dst (AndI dst src));
7986   effect(KILL cr);
7987 
7988   format %{ "AND    $dst,$src" %}
7989   opcode(0x81,0x04);  /* Opcode 81 /4 */
7990   // ins_encode( RegImm( dst, src) );
7991   ins_encode( OpcSErm( dst, src ), Con8or32( src ) );
7992   ins_pipe( ialu_reg );
7993 %}
7994 
7995 // And Register with Memory
7996 instruct andI_eReg_mem(rRegI dst, memory src, eFlagsReg cr) %{
7997   match(Set dst (AndI dst (LoadI src)));
7998   effect(KILL cr);
7999 
8000   ins_cost(125);
8001   format %{ "AND    $dst,$src" %}
8002   opcode(0x23);
8003   ins_encode( OpcP, RegMem( dst, src) );
8004   ins_pipe( ialu_reg_mem );
8005 %}
8006 
8007 // And Memory with Register
8008 instruct andI_mem_eReg(memory dst, rRegI src, eFlagsReg cr) %{
8009   match(Set dst (StoreI dst (AndI (LoadI dst) src)));
8010   effect(KILL cr);
8011 
8012   ins_cost(150);
8013   format %{ "AND    $dst,$src" %}
8014   opcode(0x21);  /* Opcode 21 /r */
8015   ins_encode( OpcP, RegMem( src, dst ) );
8016   ins_pipe( ialu_mem_reg );
8017 %}
8018 
8019 // And Memory with Immediate
8020 instruct andI_mem_imm(memory dst, immI src, eFlagsReg cr) %{
8021   match(Set dst (StoreI dst (AndI (LoadI dst) src)));
8022   effect(KILL cr);
8023 
8024   ins_cost(125);
8025   format %{ "AND    $dst,$src" %}
8026   opcode(0x81, 0x4);  /* Opcode 81 /4 id */
8027   // ins_encode( MemImm( dst, src) );
8028   ins_encode( OpcSE( src ), RMopc_Mem(secondary, dst ), Con8or32( src ) );
8029   ins_pipe( ialu_mem_imm );
8030 %}
8031 
8032 // Or Instructions
8033 // Or Register with Register
8034 instruct orI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{
8035   match(Set dst (OrI dst src));
8036   effect(KILL cr);
8037 
8038   size(2);
8039   format %{ "OR     $dst,$src" %}
8040   opcode(0x0B);
8041   ins_encode( OpcP, RegReg( dst, src) );
8042   ins_pipe( ialu_reg_reg );
8043 %}
8044 
8045 instruct orI_eReg_castP2X(rRegI dst, eRegP src, eFlagsReg cr) %{
8046   match(Set dst (OrI dst (CastP2X src)));
8047   effect(KILL cr);
8048 
8049   size(2);
8050   format %{ "OR     $dst,$src" %}
8051   opcode(0x0B);
8052   ins_encode( OpcP, RegReg( dst, src) );
8053   ins_pipe( ialu_reg_reg );
8054 %}
8055 
8056 
8057 // Or Register with Immediate
8058 instruct orI_eReg_imm(rRegI dst, immI src, eFlagsReg cr) %{
8059   match(Set dst (OrI dst src));
8060   effect(KILL cr);
8061 
8062   format %{ "OR     $dst,$src" %}
8063   opcode(0x81,0x01);  /* Opcode 81 /1 id */
8064   // ins_encode( RegImm( dst, src) );
8065   ins_encode( OpcSErm( dst, src ), Con8or32( src ) );
8066   ins_pipe( ialu_reg );
8067 %}
8068 
8069 // Or Register with Memory
8070 instruct orI_eReg_mem(rRegI dst, memory src, eFlagsReg cr) %{
8071   match(Set dst (OrI dst (LoadI src)));
8072   effect(KILL cr);
8073 
8074   ins_cost(125);
8075   format %{ "OR     $dst,$src" %}
8076   opcode(0x0B);
8077   ins_encode( OpcP, RegMem( dst, src) );
8078   ins_pipe( ialu_reg_mem );
8079 %}
8080 
8081 // Or Memory with Register
8082 instruct orI_mem_eReg(memory dst, rRegI src, eFlagsReg cr) %{
8083   match(Set dst (StoreI dst (OrI (LoadI dst) src)));
8084   effect(KILL cr);
8085 
8086   ins_cost(150);
8087   format %{ "OR     $dst,$src" %}
8088   opcode(0x09);  /* Opcode 09 /r */
8089   ins_encode( OpcP, RegMem( src, dst ) );
8090   ins_pipe( ialu_mem_reg );
8091 %}
8092 
8093 // Or Memory with Immediate
8094 instruct orI_mem_imm(memory dst, immI src, eFlagsReg cr) %{
8095   match(Set dst (StoreI dst (OrI (LoadI dst) src)));
8096   effect(KILL cr);
8097 
8098   ins_cost(125);
8099   format %{ "OR     $dst,$src" %}
8100   opcode(0x81,0x1);  /* Opcode 81 /1 id */
8101   // ins_encode( MemImm( dst, src) );
8102   ins_encode( OpcSE( src ), RMopc_Mem(secondary, dst ), Con8or32( src ) );
8103   ins_pipe( ialu_mem_imm );
8104 %}
8105 
8106 // ROL/ROR
8107 // ROL expand
8108 instruct rolI_eReg_imm1(rRegI dst, immI1 shift, eFlagsReg cr) %{
8109   effect(USE_DEF dst, USE shift, KILL cr);
8110 
8111   format %{ "ROL    $dst, $shift" %}
8112   opcode(0xD1, 0x0); /* Opcode D1 /0 */
8113   ins_encode( OpcP, RegOpc( dst ));
8114   ins_pipe( ialu_reg );
8115 %}
8116 
8117 instruct rolI_eReg_imm8(rRegI dst, immI8 shift, eFlagsReg cr) %{
8118   effect(USE_DEF dst, USE shift, KILL cr);
8119 
8120   format %{ "ROL    $dst, $shift" %}
8121   opcode(0xC1, 0x0); /*Opcode /C1  /0  */
8122   ins_encode( RegOpcImm(dst, shift) );
8123   ins_pipe(ialu_reg);
8124 %}
8125 
8126 instruct rolI_eReg_CL(ncxRegI dst, eCXRegI shift, eFlagsReg cr) %{
8127   effect(USE_DEF dst, USE shift, KILL cr);
8128 
8129   format %{ "ROL    $dst, $shift" %}
8130   opcode(0xD3, 0x0);    /* Opcode D3 /0 */
8131   ins_encode(OpcP, RegOpc(dst));
8132   ins_pipe( ialu_reg_reg );
8133 %}
8134 // end of ROL expand
8135 
8136 // ROL 32bit by one once
8137 instruct rolI_eReg_i1(rRegI dst, immI1 lshift, immI_M1 rshift, eFlagsReg cr) %{
8138   match(Set dst ( OrI (LShiftI dst lshift) (URShiftI dst rshift)));
8139 
8140   expand %{
8141     rolI_eReg_imm1(dst, lshift, cr);
8142   %}
8143 %}
8144 
8145 // ROL 32bit var by imm8 once
8146 instruct rolI_eReg_i8(rRegI dst, immI8 lshift, immI8 rshift, eFlagsReg cr) %{
8147   predicate(  0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
8148   match(Set dst ( OrI (LShiftI dst lshift) (URShiftI dst rshift)));
8149 
8150   expand %{
8151     rolI_eReg_imm8(dst, lshift, cr);
8152   %}
8153 %}
8154 
8155 // ROL 32bit var by var once
8156 instruct rolI_eReg_Var_C0(ncxRegI dst, eCXRegI shift, immI0 zero, eFlagsReg cr) %{
8157   match(Set dst ( OrI (LShiftI dst shift) (URShiftI dst (SubI zero shift))));
8158 
8159   expand %{
8160     rolI_eReg_CL(dst, shift, cr);
8161   %}
8162 %}
8163 
8164 // ROL 32bit var by var once
8165 instruct rolI_eReg_Var_C32(ncxRegI dst, eCXRegI shift, immI_32 c32, eFlagsReg cr) %{
8166   match(Set dst ( OrI (LShiftI dst shift) (URShiftI dst (SubI c32 shift))));
8167 
8168   expand %{
8169     rolI_eReg_CL(dst, shift, cr);
8170   %}
8171 %}
8172 
8173 // ROR expand
8174 instruct rorI_eReg_imm1(rRegI dst, immI1 shift, eFlagsReg cr) %{
8175   effect(USE_DEF dst, USE shift, KILL cr);
8176 
8177   format %{ "ROR    $dst, $shift" %}
8178   opcode(0xD1,0x1);  /* Opcode D1 /1 */
8179   ins_encode( OpcP, RegOpc( dst ) );
8180   ins_pipe( ialu_reg );
8181 %}
8182 
8183 instruct rorI_eReg_imm8(rRegI dst, immI8 shift, eFlagsReg cr) %{
8184   effect (USE_DEF dst, USE shift, KILL cr);
8185 
8186   format %{ "ROR    $dst, $shift" %}
8187   opcode(0xC1, 0x1); /* Opcode /C1 /1 ib */
8188   ins_encode( RegOpcImm(dst, shift) );
8189   ins_pipe( ialu_reg );
8190 %}
8191 
8192 instruct rorI_eReg_CL(ncxRegI dst, eCXRegI shift, eFlagsReg cr)%{
8193   effect(USE_DEF dst, USE shift, KILL cr);
8194 
8195   format %{ "ROR    $dst, $shift" %}
8196   opcode(0xD3, 0x1);    /* Opcode D3 /1 */
8197   ins_encode(OpcP, RegOpc(dst));
8198   ins_pipe( ialu_reg_reg );
8199 %}
8200 // end of ROR expand
8201 
8202 // ROR right once
8203 instruct rorI_eReg_i1(rRegI dst, immI1 rshift, immI_M1 lshift, eFlagsReg cr) %{
8204   match(Set dst ( OrI (URShiftI dst rshift) (LShiftI dst lshift)));
8205 
8206   expand %{
8207     rorI_eReg_imm1(dst, rshift, cr);
8208   %}
8209 %}
8210 
8211 // ROR 32bit by immI8 once
8212 instruct rorI_eReg_i8(rRegI dst, immI8 rshift, immI8 lshift, eFlagsReg cr) %{
8213   predicate(  0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
8214   match(Set dst ( OrI (URShiftI dst rshift) (LShiftI dst lshift)));
8215 
8216   expand %{
8217     rorI_eReg_imm8(dst, rshift, cr);
8218   %}
8219 %}
8220 
8221 // ROR 32bit var by var once
8222 instruct rorI_eReg_Var_C0(ncxRegI dst, eCXRegI shift, immI0 zero, eFlagsReg cr) %{
8223   match(Set dst ( OrI (URShiftI dst shift) (LShiftI dst (SubI zero shift))));
8224 
8225   expand %{
8226     rorI_eReg_CL(dst, shift, cr);
8227   %}
8228 %}
8229 
8230 // ROR 32bit var by var once
8231 instruct rorI_eReg_Var_C32(ncxRegI dst, eCXRegI shift, immI_32 c32, eFlagsReg cr) %{
8232   match(Set dst ( OrI (URShiftI dst shift) (LShiftI dst (SubI c32 shift))));
8233 
8234   expand %{
8235     rorI_eReg_CL(dst, shift, cr);
8236   %}
8237 %}
8238 
8239 // Xor Instructions
8240 // Xor Register with Register
8241 instruct xorI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{
8242   match(Set dst (XorI dst src));
8243   effect(KILL cr);
8244 
8245   size(2);
8246   format %{ "XOR    $dst,$src" %}
8247   opcode(0x33);
8248   ins_encode( OpcP, RegReg( dst, src) );
8249   ins_pipe( ialu_reg_reg );
8250 %}
8251 
8252 // Xor Register with Immediate -1
8253 instruct xorI_eReg_im1(rRegI dst, immI_M1 imm) %{
8254   match(Set dst (XorI dst imm));  
8255 
8256   size(2);
8257   format %{ "NOT    $dst" %}  
8258   ins_encode %{
8259      __ notl($dst$$Register);
8260   %}
8261   ins_pipe( ialu_reg );
8262 %}
8263 
8264 // Xor Register with Immediate
8265 instruct xorI_eReg_imm(rRegI dst, immI src, eFlagsReg cr) %{
8266   match(Set dst (XorI dst src));
8267   effect(KILL cr);
8268 
8269   format %{ "XOR    $dst,$src" %}
8270   opcode(0x81,0x06);  /* Opcode 81 /6 id */
8271   // ins_encode( RegImm( dst, src) );
8272   ins_encode( OpcSErm( dst, src ), Con8or32( src ) );
8273   ins_pipe( ialu_reg );
8274 %}
8275 
8276 // Xor Register with Memory
8277 instruct xorI_eReg_mem(rRegI dst, memory src, eFlagsReg cr) %{
8278   match(Set dst (XorI dst (LoadI src)));
8279   effect(KILL cr);
8280 
8281   ins_cost(125);
8282   format %{ "XOR    $dst,$src" %}
8283   opcode(0x33);
8284   ins_encode( OpcP, RegMem(dst, src) );
8285   ins_pipe( ialu_reg_mem );
8286 %}
8287 
8288 // Xor Memory with Register
8289 instruct xorI_mem_eReg(memory dst, rRegI src, eFlagsReg cr) %{
8290   match(Set dst (StoreI dst (XorI (LoadI dst) src)));
8291   effect(KILL cr);
8292 
8293   ins_cost(150);
8294   format %{ "XOR    $dst,$src" %}
8295   opcode(0x31);  /* Opcode 31 /r */
8296   ins_encode( OpcP, RegMem( src, dst ) );
8297   ins_pipe( ialu_mem_reg );
8298 %}
8299 
8300 // Xor Memory with Immediate
8301 instruct xorI_mem_imm(memory dst, immI src, eFlagsReg cr) %{
8302   match(Set dst (StoreI dst (XorI (LoadI dst) src)));
8303   effect(KILL cr);
8304 
8305   ins_cost(125);
8306   format %{ "XOR    $dst,$src" %}
8307   opcode(0x81,0x6);  /* Opcode 81 /6 id */
8308   ins_encode( OpcSE( src ), RMopc_Mem(secondary, dst ), Con8or32( src ) );
8309   ins_pipe( ialu_mem_imm );
8310 %}
8311 
8312 //----------Convert Int to Boolean---------------------------------------------
8313 
8314 instruct movI_nocopy(rRegI dst, rRegI src) %{
8315   effect( DEF dst, USE src );
8316   format %{ "MOV    $dst,$src" %}
8317   ins_encode( enc_Copy( dst, src) );
8318   ins_pipe( ialu_reg_reg );
8319 %}
8320 
8321 instruct ci2b( rRegI dst, rRegI src, eFlagsReg cr ) %{
8322   effect( USE_DEF dst, USE src, KILL cr );
8323 
8324   size(4);
8325   format %{ "NEG    $dst\n\t"
8326             "ADC    $dst,$src" %}
8327   ins_encode( neg_reg(dst),
8328               OpcRegReg(0x13,dst,src) );
8329   ins_pipe( ialu_reg_reg_long );
8330 %}
8331 
8332 instruct convI2B( rRegI dst, rRegI src, eFlagsReg cr ) %{
8333   match(Set dst (Conv2B src));
8334 
8335   expand %{
8336     movI_nocopy(dst,src);
8337     ci2b(dst,src,cr);
8338   %}
8339 %}
8340 
8341 instruct movP_nocopy(rRegI dst, eRegP src) %{
8342   effect( DEF dst, USE src );
8343   format %{ "MOV    $dst,$src" %}
8344   ins_encode( enc_Copy( dst, src) );
8345   ins_pipe( ialu_reg_reg );
8346 %}
8347 
8348 instruct cp2b( rRegI dst, eRegP src, eFlagsReg cr ) %{
8349   effect( USE_DEF dst, USE src, KILL cr );
8350   format %{ "NEG    $dst\n\t"
8351             "ADC    $dst,$src" %}
8352   ins_encode( neg_reg(dst),
8353               OpcRegReg(0x13,dst,src) );
8354   ins_pipe( ialu_reg_reg_long );
8355 %}
8356 
8357 instruct convP2B( rRegI dst, eRegP src, eFlagsReg cr ) %{
8358   match(Set dst (Conv2B src));
8359 
8360   expand %{
8361     movP_nocopy(dst,src);
8362     cp2b(dst,src,cr);
8363   %}
8364 %}
8365 
8366 instruct cmpLTMask(eCXRegI dst, ncxRegI p, ncxRegI q, eFlagsReg cr) %{
8367   match(Set dst (CmpLTMask p q));
8368   effect(KILL cr);
8369   ins_cost(400);
8370 
8371   // SETlt can only use low byte of EAX,EBX, ECX, or EDX as destination
8372   format %{ "XOR    $dst,$dst\n\t"
8373             "CMP    $p,$q\n\t"
8374             "SETlt  $dst\n\t"
8375             "NEG    $dst" %}
8376   ins_encode %{
8377     Register Rp = $p$$Register;
8378     Register Rq = $q$$Register;
8379     Register Rd = $dst$$Register;
8380     Label done;
8381     __ xorl(Rd, Rd);
8382     __ cmpl(Rp, Rq);
8383     __ setb(Assembler::less, Rd);
8384     __ negl(Rd);
8385   %}
8386 
8387   ins_pipe(pipe_slow);
8388 %}
8389 
8390 instruct cmpLTMask0(rRegI dst, immI0 zero, eFlagsReg cr) %{
8391   match(Set dst (CmpLTMask dst zero));
8392   effect(DEF dst, KILL cr);
8393   ins_cost(100);
8394 
8395   format %{ "SAR    $dst,31\t# cmpLTMask0" %}
8396   ins_encode %{
8397   __ sarl($dst$$Register, 31);
8398   %}
8399   ins_pipe(ialu_reg);
8400 %}
8401 
8402 /* better to save a register than avoid a branch */
8403 instruct cadd_cmpLTMask(rRegI p, rRegI q, rRegI y, eFlagsReg cr) %{
8404   match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q)));
8405   effect(KILL cr);
8406   ins_cost(400);
8407   format %{ "SUB    $p,$q\t# cadd_cmpLTMask\n\t"
8408             "JGE    done\n\t"
8409             "ADD    $p,$y\n"
8410             "done:  " %}
8411   ins_encode %{
8412     Register Rp = $p$$Register;
8413     Register Rq = $q$$Register;
8414     Register Ry = $y$$Register;
8415     Label done;
8416     __ subl(Rp, Rq);
8417     __ jccb(Assembler::greaterEqual, done);
8418     __ addl(Rp, Ry);
8419     __ bind(done);
8420   %}
8421 
8422   ins_pipe(pipe_cmplt);
8423 %}
8424 
8425 /* better to save a register than avoid a branch */
8426 instruct and_cmpLTMask(rRegI p, rRegI q, rRegI y, eFlagsReg cr) %{
8427   match(Set y (AndI (CmpLTMask p q) y));
8428   effect(KILL cr);
8429 
8430   ins_cost(300);
8431 
8432   format %{ "CMPL     $p, $q\t# and_cmpLTMask\n\t"
8433             "JLT      done\n\t"
8434             "XORL     $y, $y\n"
8435             "done:  " %}
8436   ins_encode %{
8437     Register Rp = $p$$Register;
8438     Register Rq = $q$$Register;
8439     Register Ry = $y$$Register;
8440     Label done;
8441     __ cmpl(Rp, Rq);
8442     __ jccb(Assembler::less, done);
8443     __ xorl(Ry, Ry);
8444     __ bind(done);
8445   %}
8446 
8447   ins_pipe(pipe_cmplt);
8448 %}
8449 
8450 /* If I enable this, I encourage spilling in the inner loop of compress.
8451 instruct cadd_cmpLTMask_mem(ncxRegI p, ncxRegI q, memory y, eCXRegI tmp, eFlagsReg cr) %{
8452   match(Set p (AddI (AndI (CmpLTMask p q) (LoadI y)) (SubI p q)));
8453 */
8454 //----------Overflow Math Instructions-----------------------------------------
8455 
8456 instruct overflowAddI_eReg(eFlagsReg cr, eAXRegI op1, rRegI op2)
8457 %{
8458   match(Set cr (OverflowAddI op1 op2));
8459   effect(DEF cr, USE_KILL op1, USE op2);
8460 
8461   format %{ "ADD    $op1, $op2\t# overflow check int" %}
8462 
8463   ins_encode %{
8464     __ addl($op1$$Register, $op2$$Register);
8465   %}
8466   ins_pipe(ialu_reg_reg);
8467 %}
8468 
8469 instruct overflowAddI_rReg_imm(eFlagsReg cr, eAXRegI op1, immI op2)
8470 %{
8471   match(Set cr (OverflowAddI op1 op2));
8472   effect(DEF cr, USE_KILL op1, USE op2);
8473 
8474   format %{ "ADD    $op1, $op2\t# overflow check int" %}
8475 
8476   ins_encode %{
8477     __ addl($op1$$Register, $op2$$constant);
8478   %}
8479   ins_pipe(ialu_reg_reg);
8480 %}
8481 
8482 instruct overflowSubI_rReg(eFlagsReg cr, rRegI op1, rRegI op2)
8483 %{
8484   match(Set cr (OverflowSubI op1 op2));
8485 
8486   format %{ "CMP    $op1, $op2\t# overflow check int" %}
8487   ins_encode %{
8488     __ cmpl($op1$$Register, $op2$$Register);
8489   %}
8490   ins_pipe(ialu_reg_reg);
8491 %}
8492 
8493 instruct overflowSubI_rReg_imm(eFlagsReg cr, rRegI op1, immI op2)
8494 %{
8495   match(Set cr (OverflowSubI op1 op2));
8496 
8497   format %{ "CMP    $op1, $op2\t# overflow check int" %}
8498   ins_encode %{
8499     __ cmpl($op1$$Register, $op2$$constant);
8500   %}
8501   ins_pipe(ialu_reg_reg);
8502 %}
8503 
8504 instruct overflowNegI_rReg(eFlagsReg cr, immI0 zero, eAXRegI op2)
8505 %{
8506   match(Set cr (OverflowSubI zero op2));
8507   effect(DEF cr, USE_KILL op2);
8508 
8509   format %{ "NEG    $op2\t# overflow check int" %}
8510   ins_encode %{
8511     __ negl($op2$$Register);
8512   %}
8513   ins_pipe(ialu_reg_reg);
8514 %}
8515 
8516 instruct overflowMulI_rReg(eFlagsReg cr, eAXRegI op1, rRegI op2)
8517 %{
8518   match(Set cr (OverflowMulI op1 op2));
8519   effect(DEF cr, USE_KILL op1, USE op2);
8520 
8521   format %{ "IMUL    $op1, $op2\t# overflow check int" %}
8522   ins_encode %{
8523     __ imull($op1$$Register, $op2$$Register);
8524   %}
8525   ins_pipe(ialu_reg_reg_alu0);
8526 %}
8527 
8528 instruct overflowMulI_rReg_imm(eFlagsReg cr, rRegI op1, immI op2, rRegI tmp)
8529 %{
8530   match(Set cr (OverflowMulI op1 op2));
8531   effect(DEF cr, TEMP tmp, USE op1, USE op2);
8532 
8533   format %{ "IMUL    $tmp, $op1, $op2\t# overflow check int" %}
8534   ins_encode %{
8535     __ imull($tmp$$Register, $op1$$Register, $op2$$constant);
8536   %}
8537   ins_pipe(ialu_reg_reg_alu0);
8538 %}
8539 
8540 //----------Long Instructions------------------------------------------------
8541 // Add Long Register with Register
8542 instruct addL_eReg(eRegL dst, eRegL src, eFlagsReg cr) %{
8543   match(Set dst (AddL dst src));
8544   effect(KILL cr);
8545   ins_cost(200);
8546   format %{ "ADD    $dst.lo,$src.lo\n\t"
8547             "ADC    $dst.hi,$src.hi" %}
8548   opcode(0x03, 0x13);
8549   ins_encode( RegReg_Lo(dst, src), RegReg_Hi(dst,src) );
8550   ins_pipe( ialu_reg_reg_long );
8551 %}
8552 
8553 // Add Long Register with Immediate
8554 instruct addL_eReg_imm(eRegL dst, immL src, eFlagsReg cr) %{
8555   match(Set dst (AddL dst src));
8556   effect(KILL cr);
8557   format %{ "ADD    $dst.lo,$src.lo\n\t"
8558             "ADC    $dst.hi,$src.hi" %}
8559   opcode(0x81,0x00,0x02);  /* Opcode 81 /0, 81 /2 */
8560   ins_encode( Long_OpcSErm_Lo( dst, src ), Long_OpcSErm_Hi( dst, src ) );
8561   ins_pipe( ialu_reg_long );
8562 %}
8563 
8564 // Add Long Register with Memory
8565 instruct addL_eReg_mem(eRegL dst, load_long_memory mem, eFlagsReg cr) %{
8566   match(Set dst (AddL dst (LoadL mem)));
8567   effect(KILL cr);
8568   ins_cost(125);
8569   format %{ "ADD    $dst.lo,$mem\n\t"
8570             "ADC    $dst.hi,$mem+4" %}
8571   opcode(0x03, 0x13);
8572   ins_encode( OpcP, RegMem( dst, mem), OpcS, RegMem_Hi(dst,mem) );
8573   ins_pipe( ialu_reg_long_mem );
8574 %}
8575 
8576 // Subtract Long Register with Register.
8577 instruct subL_eReg(eRegL dst, eRegL src, eFlagsReg cr) %{
8578   match(Set dst (SubL dst src));
8579   effect(KILL cr);
8580   ins_cost(200);
8581   format %{ "SUB    $dst.lo,$src.lo\n\t"
8582             "SBB    $dst.hi,$src.hi" %}
8583   opcode(0x2B, 0x1B);
8584   ins_encode( RegReg_Lo(dst, src), RegReg_Hi(dst,src) );
8585   ins_pipe( ialu_reg_reg_long );
8586 %}
8587 
8588 // Subtract Long Register with Immediate
8589 instruct subL_eReg_imm(eRegL dst, immL src, eFlagsReg cr) %{
8590   match(Set dst (SubL dst src));
8591   effect(KILL cr);
8592   format %{ "SUB    $dst.lo,$src.lo\n\t"
8593             "SBB    $dst.hi,$src.hi" %}
8594   opcode(0x81,0x05,0x03);  /* Opcode 81 /5, 81 /3 */
8595   ins_encode( Long_OpcSErm_Lo( dst, src ), Long_OpcSErm_Hi( dst, src ) );
8596   ins_pipe( ialu_reg_long );
8597 %}
8598 
8599 // Subtract Long Register with Memory
8600 instruct subL_eReg_mem(eRegL dst, load_long_memory mem, eFlagsReg cr) %{
8601   match(Set dst (SubL dst (LoadL mem)));
8602   effect(KILL cr);
8603   ins_cost(125);
8604   format %{ "SUB    $dst.lo,$mem\n\t"
8605             "SBB    $dst.hi,$mem+4" %}
8606   opcode(0x2B, 0x1B);
8607   ins_encode( OpcP, RegMem( dst, mem), OpcS, RegMem_Hi(dst,mem) );
8608   ins_pipe( ialu_reg_long_mem );
8609 %}
8610 
8611 instruct negL_eReg(eRegL dst, immL0 zero, eFlagsReg cr) %{
8612   match(Set dst (SubL zero dst));
8613   effect(KILL cr);
8614   ins_cost(300);
8615   format %{ "NEG    $dst.hi\n\tNEG    $dst.lo\n\tSBB    $dst.hi,0" %}
8616   ins_encode( neg_long(dst) );
8617   ins_pipe( ialu_reg_reg_long );
8618 %}
8619 
8620 // And Long Register with Register
8621 instruct andL_eReg(eRegL dst, eRegL src, eFlagsReg cr) %{
8622   match(Set dst (AndL dst src));
8623   effect(KILL cr);
8624   format %{ "AND    $dst.lo,$src.lo\n\t"
8625             "AND    $dst.hi,$src.hi" %}
8626   opcode(0x23,0x23);
8627   ins_encode( RegReg_Lo( dst, src), RegReg_Hi( dst, src) );
8628   ins_pipe( ialu_reg_reg_long );
8629 %}
8630 
8631 // And Long Register with Immediate
8632 instruct andL_eReg_imm(eRegL dst, immL src, eFlagsReg cr) %{
8633   match(Set dst (AndL dst src));
8634   effect(KILL cr);
8635   format %{ "AND    $dst.lo,$src.lo\n\t"
8636             "AND    $dst.hi,$src.hi" %}
8637   opcode(0x81,0x04,0x04);  /* Opcode 81 /4, 81 /4 */
8638   ins_encode( Long_OpcSErm_Lo( dst, src ), Long_OpcSErm_Hi( dst, src ) );
8639   ins_pipe( ialu_reg_long );
8640 %}
8641 
8642 // And Long Register with Memory
8643 instruct andL_eReg_mem(eRegL dst, load_long_memory mem, eFlagsReg cr) %{
8644   match(Set dst (AndL dst (LoadL mem)));
8645   effect(KILL cr);
8646   ins_cost(125);
8647   format %{ "AND    $dst.lo,$mem\n\t"
8648             "AND    $dst.hi,$mem+4" %}
8649   opcode(0x23, 0x23);
8650   ins_encode( OpcP, RegMem( dst, mem), OpcS, RegMem_Hi(dst,mem) );
8651   ins_pipe( ialu_reg_long_mem );
8652 %}
8653 
8654 // Or Long Register with Register
8655 instruct orl_eReg(eRegL dst, eRegL src, eFlagsReg cr) %{
8656   match(Set dst (OrL dst src));
8657   effect(KILL cr);
8658   format %{ "OR     $dst.lo,$src.lo\n\t"
8659             "OR     $dst.hi,$src.hi" %}
8660   opcode(0x0B,0x0B);
8661   ins_encode( RegReg_Lo( dst, src), RegReg_Hi( dst, src) );
8662   ins_pipe( ialu_reg_reg_long );
8663 %}
8664 
8665 // Or Long Register with Immediate
8666 instruct orl_eReg_imm(eRegL dst, immL src, eFlagsReg cr) %{
8667   match(Set dst (OrL dst src));
8668   effect(KILL cr);
8669   format %{ "OR     $dst.lo,$src.lo\n\t"
8670             "OR     $dst.hi,$src.hi" %}
8671   opcode(0x81,0x01,0x01);  /* Opcode 81 /1, 81 /1 */
8672   ins_encode( Long_OpcSErm_Lo( dst, src ), Long_OpcSErm_Hi( dst, src ) );
8673   ins_pipe( ialu_reg_long );
8674 %}
8675 
8676 // Or Long Register with Memory
8677 instruct orl_eReg_mem(eRegL dst, load_long_memory mem, eFlagsReg cr) %{
8678   match(Set dst (OrL dst (LoadL mem)));
8679   effect(KILL cr);
8680   ins_cost(125);
8681   format %{ "OR     $dst.lo,$mem\n\t"
8682             "OR     $dst.hi,$mem+4" %}
8683   opcode(0x0B,0x0B);
8684   ins_encode( OpcP, RegMem( dst, mem), OpcS, RegMem_Hi(dst,mem) );
8685   ins_pipe( ialu_reg_long_mem );
8686 %}
8687 
8688 // Xor Long Register with Register
8689 instruct xorl_eReg(eRegL dst, eRegL src, eFlagsReg cr) %{
8690   match(Set dst (XorL dst src));
8691   effect(KILL cr);
8692   format %{ "XOR    $dst.lo,$src.lo\n\t"
8693             "XOR    $dst.hi,$src.hi" %}
8694   opcode(0x33,0x33);
8695   ins_encode( RegReg_Lo( dst, src), RegReg_Hi( dst, src) );
8696   ins_pipe( ialu_reg_reg_long );
8697 %}
8698 
8699 // Xor Long Register with Immediate -1
8700 instruct xorl_eReg_im1(eRegL dst, immL_M1 imm) %{
8701   match(Set dst (XorL dst imm));  
8702   format %{ "NOT    $dst.lo\n\t"
8703             "NOT    $dst.hi" %}
8704   ins_encode %{
8705      __ notl($dst$$Register);
8706      __ notl(HIGH_FROM_LOW($dst$$Register));
8707   %}
8708   ins_pipe( ialu_reg_long );
8709 %}
8710 
8711 // Xor Long Register with Immediate
8712 instruct xorl_eReg_imm(eRegL dst, immL src, eFlagsReg cr) %{
8713   match(Set dst (XorL dst src));
8714   effect(KILL cr);
8715   format %{ "XOR    $dst.lo,$src.lo\n\t"
8716             "XOR    $dst.hi,$src.hi" %}
8717   opcode(0x81,0x06,0x06);  /* Opcode 81 /6, 81 /6 */
8718   ins_encode( Long_OpcSErm_Lo( dst, src ), Long_OpcSErm_Hi( dst, src ) );
8719   ins_pipe( ialu_reg_long );
8720 %}
8721 
8722 // Xor Long Register with Memory
8723 instruct xorl_eReg_mem(eRegL dst, load_long_memory mem, eFlagsReg cr) %{
8724   match(Set dst (XorL dst (LoadL mem)));
8725   effect(KILL cr);
8726   ins_cost(125);
8727   format %{ "XOR    $dst.lo,$mem\n\t"
8728             "XOR    $dst.hi,$mem+4" %}
8729   opcode(0x33,0x33);
8730   ins_encode( OpcP, RegMem( dst, mem), OpcS, RegMem_Hi(dst,mem) );
8731   ins_pipe( ialu_reg_long_mem );
8732 %}
8733 
8734 // Shift Left Long by 1
8735 instruct shlL_eReg_1(eRegL dst, immI_1 cnt, eFlagsReg cr) %{
8736   predicate(UseNewLongLShift);
8737   match(Set dst (LShiftL dst cnt));
8738   effect(KILL cr);
8739   ins_cost(100);
8740   format %{ "ADD    $dst.lo,$dst.lo\n\t"
8741             "ADC    $dst.hi,$dst.hi" %}
8742   ins_encode %{
8743     __ addl($dst$$Register,$dst$$Register);
8744     __ adcl(HIGH_FROM_LOW($dst$$Register),HIGH_FROM_LOW($dst$$Register));
8745   %}
8746   ins_pipe( ialu_reg_long );
8747 %}
8748 
8749 // Shift Left Long by 2
8750 instruct shlL_eReg_2(eRegL dst, immI_2 cnt, eFlagsReg cr) %{
8751   predicate(UseNewLongLShift);
8752   match(Set dst (LShiftL dst cnt));
8753   effect(KILL cr);
8754   ins_cost(100);
8755   format %{ "ADD    $dst.lo,$dst.lo\n\t"
8756             "ADC    $dst.hi,$dst.hi\n\t" 
8757             "ADD    $dst.lo,$dst.lo\n\t"
8758             "ADC    $dst.hi,$dst.hi" %}
8759   ins_encode %{
8760     __ addl($dst$$Register,$dst$$Register);
8761     __ adcl(HIGH_FROM_LOW($dst$$Register),HIGH_FROM_LOW($dst$$Register));
8762     __ addl($dst$$Register,$dst$$Register);
8763     __ adcl(HIGH_FROM_LOW($dst$$Register),HIGH_FROM_LOW($dst$$Register));
8764   %}
8765   ins_pipe( ialu_reg_long );
8766 %}
8767 
8768 // Shift Left Long by 3
8769 instruct shlL_eReg_3(eRegL dst, immI_3 cnt, eFlagsReg cr) %{
8770   predicate(UseNewLongLShift);
8771   match(Set dst (LShiftL dst cnt));
8772   effect(KILL cr);
8773   ins_cost(100);
8774   format %{ "ADD    $dst.lo,$dst.lo\n\t"
8775             "ADC    $dst.hi,$dst.hi\n\t" 
8776             "ADD    $dst.lo,$dst.lo\n\t"
8777             "ADC    $dst.hi,$dst.hi\n\t" 
8778             "ADD    $dst.lo,$dst.lo\n\t"
8779             "ADC    $dst.hi,$dst.hi" %}
8780   ins_encode %{
8781     __ addl($dst$$Register,$dst$$Register);
8782     __ adcl(HIGH_FROM_LOW($dst$$Register),HIGH_FROM_LOW($dst$$Register));
8783     __ addl($dst$$Register,$dst$$Register);
8784     __ adcl(HIGH_FROM_LOW($dst$$Register),HIGH_FROM_LOW($dst$$Register));
8785     __ addl($dst$$Register,$dst$$Register);
8786     __ adcl(HIGH_FROM_LOW($dst$$Register),HIGH_FROM_LOW($dst$$Register));
8787   %}
8788   ins_pipe( ialu_reg_long );
8789 %}
8790 
8791 // Shift Left Long by 1-31
8792 instruct shlL_eReg_1_31(eRegL dst, immI_1_31 cnt, eFlagsReg cr) %{
8793   match(Set dst (LShiftL dst cnt));
8794   effect(KILL cr);
8795   ins_cost(200);
8796   format %{ "SHLD   $dst.hi,$dst.lo,$cnt\n\t"
8797             "SHL    $dst.lo,$cnt" %}
8798   opcode(0xC1, 0x4, 0xA4);  /* 0F/A4, then C1 /4 ib */
8799   ins_encode( move_long_small_shift(dst,cnt) );
8800   ins_pipe( ialu_reg_long );
8801 %}
8802 
8803 // Shift Left Long by 32-63
8804 instruct shlL_eReg_32_63(eRegL dst, immI_32_63 cnt, eFlagsReg cr) %{
8805   match(Set dst (LShiftL dst cnt));
8806   effect(KILL cr);
8807   ins_cost(300);
8808   format %{ "MOV    $dst.hi,$dst.lo\n"
8809           "\tSHL    $dst.hi,$cnt-32\n"
8810           "\tXOR    $dst.lo,$dst.lo" %}
8811   opcode(0xC1, 0x4);  /* C1 /4 ib */
8812   ins_encode( move_long_big_shift_clr(dst,cnt) );
8813   ins_pipe( ialu_reg_long );
8814 %}
8815 
8816 // Shift Left Long by variable
8817 instruct salL_eReg_CL(eRegL dst, eCXRegI shift, eFlagsReg cr) %{
8818   match(Set dst (LShiftL dst shift));
8819   effect(KILL cr);
8820   ins_cost(500+200);
8821   size(17);
8822   format %{ "TEST   $shift,32\n\t"
8823             "JEQ,s  small\n\t"
8824             "MOV    $dst.hi,$dst.lo\n\t"
8825             "XOR    $dst.lo,$dst.lo\n"
8826     "small:\tSHLD   $dst.hi,$dst.lo,$shift\n\t"
8827             "SHL    $dst.lo,$shift" %}
8828   ins_encode( shift_left_long( dst, shift ) );
8829   ins_pipe( pipe_slow );
8830 %}
8831 
8832 // Shift Right Long by 1-31
8833 instruct shrL_eReg_1_31(eRegL dst, immI_1_31 cnt, eFlagsReg cr) %{
8834   match(Set dst (URShiftL dst cnt));
8835   effect(KILL cr);
8836   ins_cost(200);
8837   format %{ "SHRD   $dst.lo,$dst.hi,$cnt\n\t"
8838             "SHR    $dst.hi,$cnt" %}
8839   opcode(0xC1, 0x5, 0xAC);  /* 0F/AC, then C1 /5 ib */
8840   ins_encode( move_long_small_shift(dst,cnt) );
8841   ins_pipe( ialu_reg_long );
8842 %}
8843 
8844 // Shift Right Long by 32-63
8845 instruct shrL_eReg_32_63(eRegL dst, immI_32_63 cnt, eFlagsReg cr) %{
8846   match(Set dst (URShiftL dst cnt));
8847   effect(KILL cr);
8848   ins_cost(300);
8849   format %{ "MOV    $dst.lo,$dst.hi\n"
8850           "\tSHR    $dst.lo,$cnt-32\n"
8851           "\tXOR    $dst.hi,$dst.hi" %}
8852   opcode(0xC1, 0x5);  /* C1 /5 ib */
8853   ins_encode( move_long_big_shift_clr(dst,cnt) );
8854   ins_pipe( ialu_reg_long );
8855 %}
8856 
8857 // Shift Right Long by variable
8858 instruct shrL_eReg_CL(eRegL dst, eCXRegI shift, eFlagsReg cr) %{
8859   match(Set dst (URShiftL dst shift));
8860   effect(KILL cr);
8861   ins_cost(600);
8862   size(17);
8863   format %{ "TEST   $shift,32\n\t"
8864             "JEQ,s  small\n\t"
8865             "MOV    $dst.lo,$dst.hi\n\t"
8866             "XOR    $dst.hi,$dst.hi\n"
8867     "small:\tSHRD   $dst.lo,$dst.hi,$shift\n\t"
8868             "SHR    $dst.hi,$shift" %}
8869   ins_encode( shift_right_long( dst, shift ) );
8870   ins_pipe( pipe_slow );
8871 %}
8872 
8873 // Shift Right Long by 1-31
8874 instruct sarL_eReg_1_31(eRegL dst, immI_1_31 cnt, eFlagsReg cr) %{
8875   match(Set dst (RShiftL dst cnt));
8876   effect(KILL cr);
8877   ins_cost(200);
8878   format %{ "SHRD   $dst.lo,$dst.hi,$cnt\n\t"
8879             "SAR    $dst.hi,$cnt" %}
8880   opcode(0xC1, 0x7, 0xAC);  /* 0F/AC, then C1 /7 ib */
8881   ins_encode( move_long_small_shift(dst,cnt) );
8882   ins_pipe( ialu_reg_long );
8883 %}
8884 
8885 // Shift Right Long by 32-63
8886 instruct sarL_eReg_32_63( eRegL dst, immI_32_63 cnt, eFlagsReg cr) %{
8887   match(Set dst (RShiftL dst cnt));
8888   effect(KILL cr);
8889   ins_cost(300);
8890   format %{ "MOV    $dst.lo,$dst.hi\n"
8891           "\tSAR    $dst.lo,$cnt-32\n"
8892           "\tSAR    $dst.hi,31" %}
8893   opcode(0xC1, 0x7);  /* C1 /7 ib */
8894   ins_encode( move_long_big_shift_sign(dst,cnt) );
8895   ins_pipe( ialu_reg_long );
8896 %}
8897 
8898 // Shift Right arithmetic Long by variable
8899 instruct sarL_eReg_CL(eRegL dst, eCXRegI shift, eFlagsReg cr) %{
8900   match(Set dst (RShiftL dst shift));
8901   effect(KILL cr);
8902   ins_cost(600);
8903   size(18);
8904   format %{ "TEST   $shift,32\n\t"
8905             "JEQ,s  small\n\t"
8906             "MOV    $dst.lo,$dst.hi\n\t"
8907             "SAR    $dst.hi,31\n"
8908     "small:\tSHRD   $dst.lo,$dst.hi,$shift\n\t"
8909             "SAR    $dst.hi,$shift" %}
8910   ins_encode( shift_right_arith_long( dst, shift ) );
8911   ins_pipe( pipe_slow );
8912 %}
8913 
8914 
8915 //----------Double Instructions------------------------------------------------
8916 // Double Math
8917 
8918 // Compare & branch
8919 
8920 // P6 version of float compare, sets condition codes in EFLAGS
8921 instruct cmpDPR_cc_P6(eFlagsRegU cr, regDPR src1, regDPR src2, eAXRegI rax) %{
8922   predicate(VM_Version::supports_cmov() && UseSSE <=1);
8923   match(Set cr (CmpD src1 src2));
8924   effect(KILL rax);
8925   ins_cost(150);
8926   format %{ "FLD    $src1\n\t"
8927             "FUCOMIP ST,$src2  // P6 instruction\n\t"
8928             "JNP    exit\n\t"
8929             "MOV    ah,1       // saw a NaN, set CF\n\t"
8930             "SAHF\n"
8931      "exit:\tNOP               // avoid branch to branch" %}
8932   opcode(0xDF, 0x05); /* DF E8+i or DF /5 */
8933   ins_encode( Push_Reg_DPR(src1),
8934               OpcP, RegOpc(src2),
8935               cmpF_P6_fixup );
8936   ins_pipe( pipe_slow );
8937 %}
8938 
8939 instruct cmpDPR_cc_P6CF(eFlagsRegUCF cr, regDPR src1, regDPR src2) %{
8940   predicate(VM_Version::supports_cmov() && UseSSE <=1);
8941   match(Set cr (CmpD src1 src2));
8942   ins_cost(150);
8943   format %{ "FLD    $src1\n\t"
8944             "FUCOMIP ST,$src2  // P6 instruction" %}
8945   opcode(0xDF, 0x05); /* DF E8+i or DF /5 */
8946   ins_encode( Push_Reg_DPR(src1),
8947               OpcP, RegOpc(src2));
8948   ins_pipe( pipe_slow );
8949 %}
8950 
8951 // Compare & branch
8952 instruct cmpDPR_cc(eFlagsRegU cr, regDPR src1, regDPR src2, eAXRegI rax) %{
8953   predicate(UseSSE<=1);
8954   match(Set cr (CmpD src1 src2));
8955   effect(KILL rax);
8956   ins_cost(200);
8957   format %{ "FLD    $src1\n\t"
8958             "FCOMp  $src2\n\t"
8959             "FNSTSW AX\n\t"
8960             "TEST   AX,0x400\n\t"
8961             "JZ,s   flags\n\t"
8962             "MOV    AH,1\t# unordered treat as LT\n"
8963     "flags:\tSAHF" %}
8964   opcode(0xD8, 0x3); /* D8 D8+i or D8 /3 */
8965   ins_encode( Push_Reg_DPR(src1),
8966               OpcP, RegOpc(src2),
8967               fpu_flags);
8968   ins_pipe( pipe_slow );
8969 %}
8970 
8971 // Compare vs zero into -1,0,1
8972 instruct cmpDPR_0(rRegI dst, regDPR src1, immDPR0 zero, eAXRegI rax, eFlagsReg cr) %{
8973   predicate(UseSSE<=1);
8974   match(Set dst (CmpD3 src1 zero));
8975   effect(KILL cr, KILL rax);
8976   ins_cost(280);
8977   format %{ "FTSTD  $dst,$src1" %}
8978   opcode(0xE4, 0xD9);
8979   ins_encode( Push_Reg_DPR(src1),
8980               OpcS, OpcP, PopFPU,
8981               CmpF_Result(dst));
8982   ins_pipe( pipe_slow );
8983 %}
8984 
8985 // Compare into -1,0,1
8986 instruct cmpDPR_reg(rRegI dst, regDPR src1, regDPR src2, eAXRegI rax, eFlagsReg cr) %{
8987   predicate(UseSSE<=1);
8988   match(Set dst (CmpD3 src1 src2));
8989   effect(KILL cr, KILL rax);
8990   ins_cost(300);
8991   format %{ "FCMPD  $dst,$src1,$src2" %}
8992   opcode(0xD8, 0x3); /* D8 D8+i or D8 /3 */
8993   ins_encode( Push_Reg_DPR(src1),
8994               OpcP, RegOpc(src2),
8995               CmpF_Result(dst));
8996   ins_pipe( pipe_slow );
8997 %}
8998 
8999 // float compare and set condition codes in EFLAGS by XMM regs
9000 instruct cmpD_cc(eFlagsRegU cr, regD src1, regD src2) %{
9001   predicate(UseSSE>=2);
9002   match(Set cr (CmpD src1 src2));
9003   ins_cost(145);
9004   format %{ "UCOMISD $src1,$src2\n\t"
9005             "JNP,s   exit\n\t"
9006             "PUSHF\t# saw NaN, set CF\n\t"
9007             "AND     [rsp], #0xffffff2b\n\t"
9008             "POPF\n"
9009     "exit:" %}
9010   ins_encode %{
9011     __ ucomisd($src1$$XMMRegister, $src2$$XMMRegister);
9012     emit_cmpfp_fixup(_masm);
9013   %}
9014   ins_pipe( pipe_slow );
9015 %}
9016 
9017 instruct cmpD_ccCF(eFlagsRegUCF cr, regD src1, regD src2) %{
9018   predicate(UseSSE>=2);
9019   match(Set cr (CmpD src1 src2));
9020   ins_cost(100);
9021   format %{ "UCOMISD $src1,$src2" %}
9022   ins_encode %{
9023     __ ucomisd($src1$$XMMRegister, $src2$$XMMRegister);
9024   %}
9025   ins_pipe( pipe_slow );
9026 %}
9027 
9028 // float compare and set condition codes in EFLAGS by XMM regs
9029 instruct cmpD_ccmem(eFlagsRegU cr, regD src1, memory src2) %{
9030   predicate(UseSSE>=2);
9031   match(Set cr (CmpD src1 (LoadD src2)));
9032   ins_cost(145);
9033   format %{ "UCOMISD $src1,$src2\n\t"
9034             "JNP,s   exit\n\t"
9035             "PUSHF\t# saw NaN, set CF\n\t"
9036             "AND     [rsp], #0xffffff2b\n\t"
9037             "POPF\n"
9038     "exit:" %}
9039   ins_encode %{
9040     __ ucomisd($src1$$XMMRegister, $src2$$Address);
9041     emit_cmpfp_fixup(_masm);
9042   %}
9043   ins_pipe( pipe_slow );
9044 %}
9045 
9046 instruct cmpD_ccmemCF(eFlagsRegUCF cr, regD src1, memory src2) %{
9047   predicate(UseSSE>=2);
9048   match(Set cr (CmpD src1 (LoadD src2)));
9049   ins_cost(100);
9050   format %{ "UCOMISD $src1,$src2" %}
9051   ins_encode %{
9052     __ ucomisd($src1$$XMMRegister, $src2$$Address);
9053   %}
9054   ins_pipe( pipe_slow );
9055 %}
9056 
9057 // Compare into -1,0,1 in XMM
9058 instruct cmpD_reg(xRegI dst, regD src1, regD src2, eFlagsReg cr) %{
9059   predicate(UseSSE>=2);
9060   match(Set dst (CmpD3 src1 src2));
9061   effect(KILL cr);
9062   ins_cost(255);
9063   format %{ "UCOMISD $src1, $src2\n\t"
9064             "MOV     $dst, #-1\n\t"
9065             "JP,s    done\n\t"
9066             "JB,s    done\n\t"
9067             "SETNE   $dst\n\t"
9068             "MOVZB   $dst, $dst\n"
9069     "done:" %}
9070   ins_encode %{
9071     __ ucomisd($src1$$XMMRegister, $src2$$XMMRegister);
9072     emit_cmpfp3(_masm, $dst$$Register);
9073   %}
9074   ins_pipe( pipe_slow );
9075 %}
9076 
9077 // Compare into -1,0,1 in XMM and memory
9078 instruct cmpD_regmem(xRegI dst, regD src1, memory src2, eFlagsReg cr) %{
9079   predicate(UseSSE>=2);
9080   match(Set dst (CmpD3 src1 (LoadD src2)));
9081   effect(KILL cr);
9082   ins_cost(275);
9083   format %{ "UCOMISD $src1, $src2\n\t"
9084             "MOV     $dst, #-1\n\t"
9085             "JP,s    done\n\t"
9086             "JB,s    done\n\t"
9087             "SETNE   $dst\n\t"
9088             "MOVZB   $dst, $dst\n"
9089     "done:" %}
9090   ins_encode %{
9091     __ ucomisd($src1$$XMMRegister, $src2$$Address);
9092     emit_cmpfp3(_masm, $dst$$Register);
9093   %}
9094   ins_pipe( pipe_slow );
9095 %}
9096 
9097 
9098 instruct subDPR_reg(regDPR dst, regDPR src) %{
9099   predicate (UseSSE <=1);
9100   match(Set dst (SubD dst src));
9101 
9102   format %{ "FLD    $src\n\t"
9103             "DSUBp  $dst,ST" %}
9104   opcode(0xDE, 0x5); /* DE E8+i  or DE /5 */
9105   ins_cost(150);
9106   ins_encode( Push_Reg_DPR(src),
9107               OpcP, RegOpc(dst) );
9108   ins_pipe( fpu_reg_reg );
9109 %}
9110 
9111 instruct subDPR_reg_round(stackSlotD dst, regDPR src1, regDPR src2) %{
9112   predicate (UseSSE <=1);
9113   match(Set dst (RoundDouble (SubD src1 src2)));
9114   ins_cost(250);
9115 
9116   format %{ "FLD    $src2\n\t"
9117             "DSUB   ST,$src1\n\t"
9118             "FSTP_D $dst\t# D-round" %}
9119   opcode(0xD8, 0x5);
9120   ins_encode( Push_Reg_DPR(src2),
9121               OpcP, RegOpc(src1), Pop_Mem_DPR(dst) );
9122   ins_pipe( fpu_mem_reg_reg );
9123 %}
9124 
9125 
9126 instruct subDPR_reg_mem(regDPR dst, memory src) %{
9127   predicate (UseSSE <=1);
9128   match(Set dst (SubD dst (LoadD src)));
9129   ins_cost(150);
9130 
9131   format %{ "FLD    $src\n\t"
9132             "DSUBp  $dst,ST" %}
9133   opcode(0xDE, 0x5, 0xDD); /* DE C0+i */  /* LoadD  DD /0 */
9134   ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src),
9135               OpcP, RegOpc(dst) );
9136   ins_pipe( fpu_reg_mem );
9137 %}
9138 
9139 instruct absDPR_reg(regDPR1 dst, regDPR1 src) %{
9140   predicate (UseSSE<=1);
9141   match(Set dst (AbsD src));
9142   ins_cost(100);
9143   format %{ "FABS" %}
9144   opcode(0xE1, 0xD9);
9145   ins_encode( OpcS, OpcP );
9146   ins_pipe( fpu_reg_reg );
9147 %}
9148 
9149 instruct negDPR_reg(regDPR1 dst, regDPR1 src) %{
9150   predicate(UseSSE<=1);
9151   match(Set dst (NegD src));
9152   ins_cost(100);
9153   format %{ "FCHS" %}
9154   opcode(0xE0, 0xD9);
9155   ins_encode( OpcS, OpcP );
9156   ins_pipe( fpu_reg_reg );
9157 %}
9158 
9159 instruct addDPR_reg(regDPR dst, regDPR src) %{
9160   predicate(UseSSE<=1);
9161   match(Set dst (AddD dst src));
9162   format %{ "FLD    $src\n\t"
9163             "DADD   $dst,ST" %}
9164   size(4);
9165   ins_cost(150);
9166   opcode(0xDE, 0x0); /* DE C0+i or DE /0*/
9167   ins_encode( Push_Reg_DPR(src),
9168               OpcP, RegOpc(dst) );
9169   ins_pipe( fpu_reg_reg );
9170 %}
9171 
9172 
9173 instruct addDPR_reg_round(stackSlotD dst, regDPR src1, regDPR src2) %{
9174   predicate(UseSSE<=1);
9175   match(Set dst (RoundDouble (AddD src1 src2)));
9176   ins_cost(250);
9177 
9178   format %{ "FLD    $src2\n\t"
9179             "DADD   ST,$src1\n\t"
9180             "FSTP_D $dst\t# D-round" %}
9181   opcode(0xD8, 0x0); /* D8 C0+i or D8 /0*/
9182   ins_encode( Push_Reg_DPR(src2),
9183               OpcP, RegOpc(src1), Pop_Mem_DPR(dst) );
9184   ins_pipe( fpu_mem_reg_reg );
9185 %}
9186 
9187 
9188 instruct addDPR_reg_mem(regDPR dst, memory src) %{
9189   predicate(UseSSE<=1);
9190   match(Set dst (AddD dst (LoadD src)));
9191   ins_cost(150);
9192 
9193   format %{ "FLD    $src\n\t"
9194             "DADDp  $dst,ST" %}
9195   opcode(0xDE, 0x0, 0xDD); /* DE C0+i */  /* LoadD  DD /0 */
9196   ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src),
9197               OpcP, RegOpc(dst) );
9198   ins_pipe( fpu_reg_mem );
9199 %}
9200 
9201 // add-to-memory
9202 instruct addDPR_mem_reg(memory dst, regDPR src) %{
9203   predicate(UseSSE<=1);
9204   match(Set dst (StoreD dst (RoundDouble (AddD (LoadD dst) src))));
9205   ins_cost(150);
9206 
9207   format %{ "FLD_D  $dst\n\t"
9208             "DADD   ST,$src\n\t"
9209             "FST_D  $dst" %}
9210   opcode(0xDD, 0x0);
9211   ins_encode( Opcode(0xDD), RMopc_Mem(0x00,dst),
9212               Opcode(0xD8), RegOpc(src),
9213               set_instruction_start,
9214               Opcode(0xDD), RMopc_Mem(0x03,dst) );
9215   ins_pipe( fpu_reg_mem );
9216 %}
9217 
9218 instruct addDPR_reg_imm1(regDPR dst, immDPR1 con) %{
9219   predicate(UseSSE<=1);
9220   match(Set dst (AddD dst con));
9221   ins_cost(125);
9222   format %{ "FLD1\n\t"
9223             "DADDp  $dst,ST" %}
9224   ins_encode %{
9225     __ fld1();
9226     __ faddp($dst$$reg);
9227   %}
9228   ins_pipe(fpu_reg);
9229 %}
9230 
9231 instruct addDPR_reg_imm(regDPR dst, immDPR con) %{
9232   predicate(UseSSE<=1 && _kids[1]->_leaf->getd() != 0.0 && _kids[1]->_leaf->getd() != 1.0 );
9233   match(Set dst (AddD dst con));
9234   ins_cost(200);
9235   format %{ "FLD_D  [$constantaddress]\t# load from constant table: double=$con\n\t"
9236             "DADDp  $dst,ST" %}
9237   ins_encode %{
9238     __ fld_d($constantaddress($con));
9239     __ faddp($dst$$reg);
9240   %}
9241   ins_pipe(fpu_reg_mem);
9242 %}
9243 
9244 instruct addDPR_reg_imm_round(stackSlotD dst, regDPR src, immDPR con) %{
9245   predicate(UseSSE<=1 && _kids[0]->_kids[1]->_leaf->getd() != 0.0 && _kids[0]->_kids[1]->_leaf->getd() != 1.0 );
9246   match(Set dst (RoundDouble (AddD src con)));
9247   ins_cost(200);
9248   format %{ "FLD_D  [$constantaddress]\t# load from constant table: double=$con\n\t"
9249             "DADD   ST,$src\n\t"
9250             "FSTP_D $dst\t# D-round" %}
9251   ins_encode %{
9252     __ fld_d($constantaddress($con));
9253     __ fadd($src$$reg);
9254     __ fstp_d(Address(rsp, $dst$$disp));
9255   %}
9256   ins_pipe(fpu_mem_reg_con);
9257 %}
9258 
9259 instruct mulDPR_reg(regDPR dst, regDPR src) %{
9260   predicate(UseSSE<=1);
9261   match(Set dst (MulD dst src));
9262   format %{ "FLD    $src\n\t"
9263             "DMULp  $dst,ST" %}
9264   opcode(0xDE, 0x1); /* DE C8+i or DE /1*/
9265   ins_cost(150);
9266   ins_encode( Push_Reg_DPR(src),
9267               OpcP, RegOpc(dst) );
9268   ins_pipe( fpu_reg_reg );
9269 %}
9270 
9271 // Strict FP instruction biases argument before multiply then
9272 // biases result to avoid double rounding of subnormals.
9273 //
9274 // scale arg1 by multiplying arg1 by 2^(-15360)
9275 // load arg2
9276 // multiply scaled arg1 by arg2
9277 // rescale product by 2^(15360)
9278 //
9279 instruct strictfp_mulDPR_reg(regDPR1 dst, regnotDPR1 src) %{
9280   predicate( UseSSE<=1 && Compile::current()->has_method() && Compile::current()->method()->is_strict() );
9281   match(Set dst (MulD dst src));
9282   ins_cost(1);   // Select this instruction for all strict FP double multiplies
9283 
9284   format %{ "FLD    StubRoutines::_fpu_subnormal_bias1\n\t"
9285             "DMULp  $dst,ST\n\t"
9286             "FLD    $src\n\t"
9287             "DMULp  $dst,ST\n\t"
9288             "FLD    StubRoutines::_fpu_subnormal_bias2\n\t"
9289             "DMULp  $dst,ST\n\t" %}
9290   opcode(0xDE, 0x1); /* DE C8+i or DE /1*/
9291   ins_encode( strictfp_bias1(dst),
9292               Push_Reg_DPR(src),
9293               OpcP, RegOpc(dst),
9294               strictfp_bias2(dst) );
9295   ins_pipe( fpu_reg_reg );
9296 %}
9297 
9298 instruct mulDPR_reg_imm(regDPR dst, immDPR con) %{
9299   predicate( UseSSE<=1 && _kids[1]->_leaf->getd() != 0.0 && _kids[1]->_leaf->getd() != 1.0 );
9300   match(Set dst (MulD dst con));
9301   ins_cost(200);
9302   format %{ "FLD_D  [$constantaddress]\t# load from constant table: double=$con\n\t"
9303             "DMULp  $dst,ST" %}
9304   ins_encode %{
9305     __ fld_d($constantaddress($con));
9306     __ fmulp($dst$$reg);
9307   %}
9308   ins_pipe(fpu_reg_mem);
9309 %}
9310 
9311 
9312 instruct mulDPR_reg_mem(regDPR dst, memory src) %{
9313   predicate( UseSSE<=1 );
9314   match(Set dst (MulD dst (LoadD src)));
9315   ins_cost(200);
9316   format %{ "FLD_D  $src\n\t"
9317             "DMULp  $dst,ST" %}
9318   opcode(0xDE, 0x1, 0xDD); /* DE C8+i or DE /1*/  /* LoadD  DD /0 */
9319   ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src),
9320               OpcP, RegOpc(dst) );
9321   ins_pipe( fpu_reg_mem );
9322 %}
9323 
9324 //
9325 // Cisc-alternate to reg-reg multiply
9326 instruct mulDPR_reg_mem_cisc(regDPR dst, regDPR src, memory mem) %{
9327   predicate( UseSSE<=1 );
9328   match(Set dst (MulD src (LoadD mem)));
9329   ins_cost(250);
9330   format %{ "FLD_D  $mem\n\t"
9331             "DMUL   ST,$src\n\t"
9332             "FSTP_D $dst" %}
9333   opcode(0xD8, 0x1, 0xD9); /* D8 C8+i */  /* LoadD D9 /0 */
9334   ins_encode( Opcode(tertiary), RMopc_Mem(0x00,mem),
9335               OpcReg_FPR(src),
9336               Pop_Reg_DPR(dst) );
9337   ins_pipe( fpu_reg_reg_mem );
9338 %}
9339 
9340 
9341 // MACRO3 -- addDPR a mulDPR
9342 // This instruction is a '2-address' instruction in that the result goes
9343 // back to src2.  This eliminates a move from the macro; possibly the
9344 // register allocator will have to add it back (and maybe not).
9345 instruct addDPR_mulDPR_reg(regDPR src2, regDPR src1, regDPR src0) %{
9346   predicate( UseSSE<=1 );
9347   match(Set src2 (AddD (MulD src0 src1) src2));
9348   format %{ "FLD    $src0\t# ===MACRO3d===\n\t"
9349             "DMUL   ST,$src1\n\t"
9350             "DADDp  $src2,ST" %}
9351   ins_cost(250);
9352   opcode(0xDD); /* LoadD DD /0 */
9353   ins_encode( Push_Reg_FPR(src0),
9354               FMul_ST_reg(src1),
9355               FAddP_reg_ST(src2) );
9356   ins_pipe( fpu_reg_reg_reg );
9357 %}
9358 
9359 
9360 // MACRO3 -- subDPR a mulDPR
9361 instruct subDPR_mulDPR_reg(regDPR src2, regDPR src1, regDPR src0) %{
9362   predicate( UseSSE<=1 );
9363   match(Set src2 (SubD (MulD src0 src1) src2));
9364   format %{ "FLD    $src0\t# ===MACRO3d===\n\t"
9365             "DMUL   ST,$src1\n\t"
9366             "DSUBRp $src2,ST" %}
9367   ins_cost(250);
9368   ins_encode( Push_Reg_FPR(src0),
9369               FMul_ST_reg(src1),
9370               Opcode(0xDE), Opc_plus(0xE0,src2));
9371   ins_pipe( fpu_reg_reg_reg );
9372 %}
9373 
9374 
9375 instruct divDPR_reg(regDPR dst, regDPR src) %{
9376   predicate( UseSSE<=1 );
9377   match(Set dst (DivD dst src));
9378 
9379   format %{ "FLD    $src\n\t"
9380             "FDIVp  $dst,ST" %}
9381   opcode(0xDE, 0x7); /* DE F8+i or DE /7*/
9382   ins_cost(150);
9383   ins_encode( Push_Reg_DPR(src),
9384               OpcP, RegOpc(dst) );
9385   ins_pipe( fpu_reg_reg );
9386 %}
9387 
9388 // Strict FP instruction biases argument before division then
9389 // biases result, to avoid double rounding of subnormals.
9390 //
9391 // scale dividend by multiplying dividend by 2^(-15360)
9392 // load divisor
9393 // divide scaled dividend by divisor
9394 // rescale quotient by 2^(15360)
9395 //
9396 instruct strictfp_divDPR_reg(regDPR1 dst, regnotDPR1 src) %{
9397   predicate (UseSSE<=1);
9398   match(Set dst (DivD dst src));
9399   predicate( UseSSE<=1 && Compile::current()->has_method() && Compile::current()->method()->is_strict() );
9400   ins_cost(01);
9401 
9402   format %{ "FLD    StubRoutines::_fpu_subnormal_bias1\n\t"
9403             "DMULp  $dst,ST\n\t"
9404             "FLD    $src\n\t"
9405             "FDIVp  $dst,ST\n\t"
9406             "FLD    StubRoutines::_fpu_subnormal_bias2\n\t"
9407             "DMULp  $dst,ST\n\t" %}
9408   opcode(0xDE, 0x7); /* DE F8+i or DE /7*/
9409   ins_encode( strictfp_bias1(dst),
9410               Push_Reg_DPR(src),
9411               OpcP, RegOpc(dst),
9412               strictfp_bias2(dst) );
9413   ins_pipe( fpu_reg_reg );
9414 %}
9415 
9416 instruct divDPR_reg_round(stackSlotD dst, regDPR src1, regDPR src2) %{
9417   predicate( UseSSE<=1 && !(Compile::current()->has_method() && Compile::current()->method()->is_strict()) );
9418   match(Set dst (RoundDouble (DivD src1 src2)));
9419 
9420   format %{ "FLD    $src1\n\t"
9421             "FDIV   ST,$src2\n\t"
9422             "FSTP_D $dst\t# D-round" %}
9423   opcode(0xD8, 0x6); /* D8 F0+i or D8 /6 */
9424   ins_encode( Push_Reg_DPR(src1),
9425               OpcP, RegOpc(src2), Pop_Mem_DPR(dst) );
9426   ins_pipe( fpu_mem_reg_reg );
9427 %}
9428 
9429 
9430 instruct modDPR_reg(regDPR dst, regDPR src, eAXRegI rax, eFlagsReg cr) %{
9431   predicate(UseSSE<=1);
9432   match(Set dst (ModD dst src));
9433   effect(KILL rax, KILL cr); // emitModDPR() uses EAX and EFLAGS
9434 
9435   format %{ "DMOD   $dst,$src" %}
9436   ins_cost(250);
9437   ins_encode(Push_Reg_Mod_DPR(dst, src),
9438               emitModDPR(),
9439               Push_Result_Mod_DPR(src),
9440               Pop_Reg_DPR(dst));
9441   ins_pipe( pipe_slow );
9442 %}
9443 
9444 instruct modD_reg(regD dst, regD src0, regD src1, eAXRegI rax, eFlagsReg cr) %{
9445   predicate(UseSSE>=2);
9446   match(Set dst (ModD src0 src1));
9447   effect(KILL rax, KILL cr);
9448 
9449   format %{ "SUB    ESP,8\t # DMOD\n"
9450           "\tMOVSD  [ESP+0],$src1\n"
9451           "\tFLD_D  [ESP+0]\n"
9452           "\tMOVSD  [ESP+0],$src0\n"
9453           "\tFLD_D  [ESP+0]\n"
9454      "loop:\tFPREM\n"
9455           "\tFWAIT\n"
9456           "\tFNSTSW AX\n"
9457           "\tSAHF\n"
9458           "\tJP     loop\n"
9459           "\tFSTP_D [ESP+0]\n"
9460           "\tMOVSD  $dst,[ESP+0]\n"
9461           "\tADD    ESP,8\n"
9462           "\tFSTP   ST0\t # Restore FPU Stack"
9463     %}
9464   ins_cost(250);
9465   ins_encode( Push_ModD_encoding(src0, src1), emitModDPR(), Push_ResultD(dst), PopFPU);
9466   ins_pipe( pipe_slow );
9467 %}
9468 
9469 instruct sinDPR_reg(regDPR1 dst, regDPR1 src) %{
9470   predicate (UseSSE<=1);
9471   match(Set dst (SinD src));
9472   ins_cost(1800);
9473   format %{ "DSIN   $dst" %}
9474   opcode(0xD9, 0xFE);
9475   ins_encode( OpcP, OpcS );
9476   ins_pipe( pipe_slow );
9477 %}
9478 
9479 instruct sinD_reg(regD dst, eFlagsReg cr) %{
9480   predicate (UseSSE>=2);
9481   match(Set dst (SinD dst));
9482   effect(KILL cr); // Push_{Src|Result}D() uses "{SUB|ADD} ESP,8"
9483   ins_cost(1800);
9484   format %{ "DSIN   $dst" %}
9485   opcode(0xD9, 0xFE);
9486   ins_encode( Push_SrcD(dst), OpcP, OpcS, Push_ResultD(dst) );
9487   ins_pipe( pipe_slow );
9488 %}
9489 
9490 instruct cosDPR_reg(regDPR1 dst, regDPR1 src) %{
9491   predicate (UseSSE<=1);
9492   match(Set dst (CosD src));
9493   ins_cost(1800);
9494   format %{ "DCOS   $dst" %}
9495   opcode(0xD9, 0xFF);
9496   ins_encode( OpcP, OpcS );
9497   ins_pipe( pipe_slow );
9498 %}
9499 
9500 instruct cosD_reg(regD dst, eFlagsReg cr) %{
9501   predicate (UseSSE>=2);
9502   match(Set dst (CosD dst));
9503   effect(KILL cr); // Push_{Src|Result}D() uses "{SUB|ADD} ESP,8"
9504   ins_cost(1800);
9505   format %{ "DCOS   $dst" %}
9506   opcode(0xD9, 0xFF);
9507   ins_encode( Push_SrcD(dst), OpcP, OpcS, Push_ResultD(dst) );
9508   ins_pipe( pipe_slow );
9509 %}
9510 
9511 instruct tanDPR_reg(regDPR1 dst, regDPR1 src) %{
9512   predicate (UseSSE<=1);
9513   match(Set dst(TanD src));
9514   format %{ "DTAN   $dst" %}
9515   ins_encode( Opcode(0xD9), Opcode(0xF2),    // fptan
9516               Opcode(0xDD), Opcode(0xD8));   // fstp st
9517   ins_pipe( pipe_slow );
9518 %}
9519 
9520 instruct tanD_reg(regD dst, eFlagsReg cr) %{
9521   predicate (UseSSE>=2);
9522   match(Set dst(TanD dst));
9523   effect(KILL cr); // Push_{Src|Result}D() uses "{SUB|ADD} ESP,8"
9524   format %{ "DTAN   $dst" %}
9525   ins_encode( Push_SrcD(dst),
9526               Opcode(0xD9), Opcode(0xF2),    // fptan
9527               Opcode(0xDD), Opcode(0xD8),   // fstp st
9528               Push_ResultD(dst) );
9529   ins_pipe( pipe_slow );
9530 %}
9531 
9532 instruct atanDPR_reg(regDPR dst, regDPR src) %{
9533   predicate (UseSSE<=1);
9534   match(Set dst(AtanD dst src));
9535   format %{ "DATA   $dst,$src" %}
9536   opcode(0xD9, 0xF3);
9537   ins_encode( Push_Reg_DPR(src),
9538               OpcP, OpcS, RegOpc(dst) );
9539   ins_pipe( pipe_slow );
9540 %}
9541 
9542 instruct atanD_reg(regD dst, regD src, eFlagsReg cr) %{
9543   predicate (UseSSE>=2);
9544   match(Set dst(AtanD dst src));
9545   effect(KILL cr); // Push_{Src|Result}D() uses "{SUB|ADD} ESP,8"
9546   format %{ "DATA   $dst,$src" %}
9547   opcode(0xD9, 0xF3);
9548   ins_encode( Push_SrcD(src),
9549               OpcP, OpcS, Push_ResultD(dst) );
9550   ins_pipe( pipe_slow );
9551 %}
9552 
9553 instruct sqrtDPR_reg(regDPR dst, regDPR src) %{
9554   predicate (UseSSE<=1);
9555   match(Set dst (SqrtD src));
9556   format %{ "DSQRT  $dst,$src" %}
9557   opcode(0xFA, 0xD9);
9558   ins_encode( Push_Reg_DPR(src),
9559               OpcS, OpcP, Pop_Reg_DPR(dst) );
9560   ins_pipe( pipe_slow );
9561 %}
9562 
9563 instruct powDPR_reg(regDPR X, regDPR1 Y, eAXRegI rax, eDXRegI rdx, eCXRegI rcx, eFlagsReg cr) %{
9564   predicate (UseSSE<=1);
9565   match(Set Y (PowD X Y));  // Raise X to the Yth power
9566   effect(KILL rax, KILL rdx, KILL rcx, KILL cr);
9567   format %{ "fast_pow $X $Y -> $Y  // KILL $rax, $rcx, $rdx" %}
9568   ins_encode %{
9569     __ subptr(rsp, 8);
9570     __ fld_s($X$$reg - 1);
9571     __ fast_pow();
9572     __ addptr(rsp, 8);
9573   %}
9574   ins_pipe( pipe_slow );
9575 %}
9576 
9577 instruct powD_reg(regD dst, regD src0, regD src1, eAXRegI rax, eDXRegI rdx, eCXRegI rcx, eFlagsReg cr) %{
9578   predicate (UseSSE>=2);
9579   match(Set dst (PowD src0 src1));  // Raise src0 to the src1'th power
9580   effect(KILL rax, KILL rdx, KILL rcx, KILL cr);
9581   format %{ "fast_pow $src0 $src1 -> $dst  // KILL $rax, $rcx, $rdx" %}
9582   ins_encode %{
9583     __ subptr(rsp, 8);
9584     __ movdbl(Address(rsp, 0), $src1$$XMMRegister);
9585     __ fld_d(Address(rsp, 0));
9586     __ movdbl(Address(rsp, 0), $src0$$XMMRegister);
9587     __ fld_d(Address(rsp, 0));
9588     __ fast_pow();
9589     __ fstp_d(Address(rsp, 0));
9590     __ movdbl($dst$$XMMRegister, Address(rsp, 0));
9591     __ addptr(rsp, 8);
9592   %}
9593   ins_pipe( pipe_slow );
9594 %}
9595 
9596 
9597 instruct expDPR_reg(regDPR1 dpr1, eAXRegI rax, eDXRegI rdx, eCXRegI rcx, eFlagsReg cr) %{
9598   predicate (UseSSE<=1);
9599   match(Set dpr1 (ExpD dpr1));
9600   effect(KILL rax, KILL rcx, KILL rdx, KILL cr);
9601   format %{ "fast_exp $dpr1 -> $dpr1  // KILL $rax, $rcx, $rdx" %}
9602   ins_encode %{
9603     __ fast_exp();
9604   %}
9605   ins_pipe( pipe_slow );
9606 %}
9607 
9608 instruct expD_reg(regD dst, regD src, eAXRegI rax, eDXRegI rdx, eCXRegI rcx, eFlagsReg cr) %{
9609   predicate (UseSSE>=2);
9610   match(Set dst (ExpD src));
9611   effect(KILL rax, KILL rcx, KILL rdx, KILL cr);
9612   format %{ "fast_exp $dst -> $src  // KILL $rax, $rcx, $rdx" %}
9613   ins_encode %{
9614     __ subptr(rsp, 8);
9615     __ movdbl(Address(rsp, 0), $src$$XMMRegister);
9616     __ fld_d(Address(rsp, 0));
9617     __ fast_exp();
9618     __ fstp_d(Address(rsp, 0));
9619     __ movdbl($dst$$XMMRegister, Address(rsp, 0));
9620     __ addptr(rsp, 8);
9621   %}
9622   ins_pipe( pipe_slow );
9623 %}
9624 
9625 instruct log10DPR_reg(regDPR1 dst, regDPR1 src) %{
9626   predicate (UseSSE<=1);
9627   // The source Double operand on FPU stack
9628   match(Set dst (Log10D src));
9629   // fldlg2       ; push log_10(2) on the FPU stack; full 80-bit number
9630   // fxch         ; swap ST(0) with ST(1)
9631   // fyl2x        ; compute log_10(2) * log_2(x)
9632   format %{ "FLDLG2 \t\t\t#Log10\n\t"
9633             "FXCH   \n\t"
9634             "FYL2X  \t\t\t# Q=Log10*Log_2(x)"
9635          %}
9636   ins_encode( Opcode(0xD9), Opcode(0xEC),   // fldlg2
9637               Opcode(0xD9), Opcode(0xC9),   // fxch
9638               Opcode(0xD9), Opcode(0xF1));  // fyl2x
9639 
9640   ins_pipe( pipe_slow );
9641 %}
9642 
9643 instruct log10D_reg(regD dst, regD src, eFlagsReg cr) %{
9644   predicate (UseSSE>=2);
9645   effect(KILL cr);
9646   match(Set dst (Log10D src));
9647   // fldlg2       ; push log_10(2) on the FPU stack; full 80-bit number
9648   // fyl2x        ; compute log_10(2) * log_2(x)
9649   format %{ "FLDLG2 \t\t\t#Log10\n\t"
9650             "FYL2X  \t\t\t# Q=Log10*Log_2(x)"
9651          %}
9652   ins_encode( Opcode(0xD9), Opcode(0xEC),   // fldlg2
9653               Push_SrcD(src),
9654               Opcode(0xD9), Opcode(0xF1),   // fyl2x
9655               Push_ResultD(dst));
9656 
9657   ins_pipe( pipe_slow );
9658 %}
9659 
9660 instruct logDPR_reg(regDPR1 dst, regDPR1 src) %{
9661   predicate (UseSSE<=1);
9662   // The source Double operand on FPU stack
9663   match(Set dst (LogD src));
9664   // fldln2       ; push log_e(2) on the FPU stack; full 80-bit number
9665   // fxch         ; swap ST(0) with ST(1)
9666   // fyl2x        ; compute log_e(2) * log_2(x)
9667   format %{ "FLDLN2 \t\t\t#Log_e\n\t"
9668             "FXCH   \n\t"
9669             "FYL2X  \t\t\t# Q=Log_e*Log_2(x)"
9670          %}
9671   ins_encode( Opcode(0xD9), Opcode(0xED),   // fldln2
9672               Opcode(0xD9), Opcode(0xC9),   // fxch
9673               Opcode(0xD9), Opcode(0xF1));  // fyl2x
9674 
9675   ins_pipe( pipe_slow );
9676 %}
9677 
9678 instruct logD_reg(regD dst, regD src, eFlagsReg cr) %{
9679   predicate (UseSSE>=2);
9680   effect(KILL cr);
9681   // The source and result Double operands in XMM registers
9682   match(Set dst (LogD src));
9683   // fldln2       ; push log_e(2) on the FPU stack; full 80-bit number
9684   // fyl2x        ; compute log_e(2) * log_2(x)
9685   format %{ "FLDLN2 \t\t\t#Log_e\n\t"
9686             "FYL2X  \t\t\t# Q=Log_e*Log_2(x)"
9687          %}
9688   ins_encode( Opcode(0xD9), Opcode(0xED),   // fldln2
9689               Push_SrcD(src),
9690               Opcode(0xD9), Opcode(0xF1),   // fyl2x
9691               Push_ResultD(dst));
9692   ins_pipe( pipe_slow );
9693 %}
9694 
9695 //-------------Float Instructions-------------------------------
9696 // Float Math
9697 
9698 // Code for float compare:
9699 //     fcompp();
9700 //     fwait(); fnstsw_ax();
9701 //     sahf();
9702 //     movl(dst, unordered_result);
9703 //     jcc(Assembler::parity, exit);
9704 //     movl(dst, less_result);
9705 //     jcc(Assembler::below, exit);
9706 //     movl(dst, equal_result);
9707 //     jcc(Assembler::equal, exit);
9708 //     movl(dst, greater_result);
9709 //   exit:
9710 
9711 // P6 version of float compare, sets condition codes in EFLAGS
9712 instruct cmpFPR_cc_P6(eFlagsRegU cr, regFPR src1, regFPR src2, eAXRegI rax) %{
9713   predicate(VM_Version::supports_cmov() && UseSSE == 0);
9714   match(Set cr (CmpF src1 src2));
9715   effect(KILL rax);
9716   ins_cost(150);
9717   format %{ "FLD    $src1\n\t"
9718             "FUCOMIP ST,$src2  // P6 instruction\n\t"
9719             "JNP    exit\n\t"
9720             "MOV    ah,1       // saw a NaN, set CF (treat as LT)\n\t"
9721             "SAHF\n"
9722      "exit:\tNOP               // avoid branch to branch" %}
9723   opcode(0xDF, 0x05); /* DF E8+i or DF /5 */
9724   ins_encode( Push_Reg_DPR(src1),
9725               OpcP, RegOpc(src2),
9726               cmpF_P6_fixup );
9727   ins_pipe( pipe_slow );
9728 %}
9729 
9730 instruct cmpFPR_cc_P6CF(eFlagsRegUCF cr, regFPR src1, regFPR src2) %{
9731   predicate(VM_Version::supports_cmov() && UseSSE == 0);
9732   match(Set cr (CmpF src1 src2));
9733   ins_cost(100);
9734   format %{ "FLD    $src1\n\t"
9735             "FUCOMIP ST,$src2  // P6 instruction" %}
9736   opcode(0xDF, 0x05); /* DF E8+i or DF /5 */
9737   ins_encode( Push_Reg_DPR(src1),
9738               OpcP, RegOpc(src2));
9739   ins_pipe( pipe_slow );
9740 %}
9741 
9742 
9743 // Compare & branch
9744 instruct cmpFPR_cc(eFlagsRegU cr, regFPR src1, regFPR src2, eAXRegI rax) %{
9745   predicate(UseSSE == 0);
9746   match(Set cr (CmpF src1 src2));
9747   effect(KILL rax);
9748   ins_cost(200);
9749   format %{ "FLD    $src1\n\t"
9750             "FCOMp  $src2\n\t"
9751             "FNSTSW AX\n\t"
9752             "TEST   AX,0x400\n\t"
9753             "JZ,s   flags\n\t"
9754             "MOV    AH,1\t# unordered treat as LT\n"
9755     "flags:\tSAHF" %}
9756   opcode(0xD8, 0x3); /* D8 D8+i or D8 /3 */
9757   ins_encode( Push_Reg_DPR(src1),
9758               OpcP, RegOpc(src2),
9759               fpu_flags);
9760   ins_pipe( pipe_slow );
9761 %}
9762 
9763 // Compare vs zero into -1,0,1
9764 instruct cmpFPR_0(rRegI dst, regFPR src1, immFPR0 zero, eAXRegI rax, eFlagsReg cr) %{
9765   predicate(UseSSE == 0);
9766   match(Set dst (CmpF3 src1 zero));
9767   effect(KILL cr, KILL rax);
9768   ins_cost(280);
9769   format %{ "FTSTF  $dst,$src1" %}
9770   opcode(0xE4, 0xD9);
9771   ins_encode( Push_Reg_DPR(src1),
9772               OpcS, OpcP, PopFPU,
9773               CmpF_Result(dst));
9774   ins_pipe( pipe_slow );
9775 %}
9776 
9777 // Compare into -1,0,1
9778 instruct cmpFPR_reg(rRegI dst, regFPR src1, regFPR src2, eAXRegI rax, eFlagsReg cr) %{
9779   predicate(UseSSE == 0);
9780   match(Set dst (CmpF3 src1 src2));
9781   effect(KILL cr, KILL rax);
9782   ins_cost(300);
9783   format %{ "FCMPF  $dst,$src1,$src2" %}
9784   opcode(0xD8, 0x3); /* D8 D8+i or D8 /3 */
9785   ins_encode( Push_Reg_DPR(src1),
9786               OpcP, RegOpc(src2),
9787               CmpF_Result(dst));
9788   ins_pipe( pipe_slow );
9789 %}
9790 
9791 // float compare and set condition codes in EFLAGS by XMM regs
9792 instruct cmpF_cc(eFlagsRegU cr, regF src1, regF src2) %{
9793   predicate(UseSSE>=1);
9794   match(Set cr (CmpF src1 src2));
9795   ins_cost(145);
9796   format %{ "UCOMISS $src1,$src2\n\t"
9797             "JNP,s   exit\n\t"
9798             "PUSHF\t# saw NaN, set CF\n\t"
9799             "AND     [rsp], #0xffffff2b\n\t"
9800             "POPF\n"
9801     "exit:" %}
9802   ins_encode %{
9803     __ ucomiss($src1$$XMMRegister, $src2$$XMMRegister);
9804     emit_cmpfp_fixup(_masm);
9805   %}
9806   ins_pipe( pipe_slow );
9807 %}
9808 
9809 instruct cmpF_ccCF(eFlagsRegUCF cr, regF src1, regF src2) %{
9810   predicate(UseSSE>=1);
9811   match(Set cr (CmpF src1 src2));
9812   ins_cost(100);
9813   format %{ "UCOMISS $src1,$src2" %}
9814   ins_encode %{
9815     __ ucomiss($src1$$XMMRegister, $src2$$XMMRegister);
9816   %}
9817   ins_pipe( pipe_slow );
9818 %}
9819 
9820 // float compare and set condition codes in EFLAGS by XMM regs
9821 instruct cmpF_ccmem(eFlagsRegU cr, regF src1, memory src2) %{
9822   predicate(UseSSE>=1);
9823   match(Set cr (CmpF src1 (LoadF src2)));
9824   ins_cost(165);
9825   format %{ "UCOMISS $src1,$src2\n\t"
9826             "JNP,s   exit\n\t"
9827             "PUSHF\t# saw NaN, set CF\n\t"
9828             "AND     [rsp], #0xffffff2b\n\t"
9829             "POPF\n"
9830     "exit:" %}
9831   ins_encode %{
9832     __ ucomiss($src1$$XMMRegister, $src2$$Address);
9833     emit_cmpfp_fixup(_masm);
9834   %}
9835   ins_pipe( pipe_slow );
9836 %}
9837 
9838 instruct cmpF_ccmemCF(eFlagsRegUCF cr, regF src1, memory src2) %{
9839   predicate(UseSSE>=1);
9840   match(Set cr (CmpF src1 (LoadF src2)));
9841   ins_cost(100);
9842   format %{ "UCOMISS $src1,$src2" %}
9843   ins_encode %{
9844     __ ucomiss($src1$$XMMRegister, $src2$$Address);
9845   %}
9846   ins_pipe( pipe_slow );
9847 %}
9848 
9849 // Compare into -1,0,1 in XMM
9850 instruct cmpF_reg(xRegI dst, regF src1, regF src2, eFlagsReg cr) %{
9851   predicate(UseSSE>=1);
9852   match(Set dst (CmpF3 src1 src2));
9853   effect(KILL cr);
9854   ins_cost(255);
9855   format %{ "UCOMISS $src1, $src2\n\t"
9856             "MOV     $dst, #-1\n\t"
9857             "JP,s    done\n\t"
9858             "JB,s    done\n\t"
9859             "SETNE   $dst\n\t"
9860             "MOVZB   $dst, $dst\n"
9861     "done:" %}
9862   ins_encode %{
9863     __ ucomiss($src1$$XMMRegister, $src2$$XMMRegister);
9864     emit_cmpfp3(_masm, $dst$$Register);
9865   %}
9866   ins_pipe( pipe_slow );
9867 %}
9868 
9869 // Compare into -1,0,1 in XMM and memory
9870 instruct cmpF_regmem(xRegI dst, regF src1, memory src2, eFlagsReg cr) %{
9871   predicate(UseSSE>=1);
9872   match(Set dst (CmpF3 src1 (LoadF src2)));
9873   effect(KILL cr);
9874   ins_cost(275);
9875   format %{ "UCOMISS $src1, $src2\n\t"
9876             "MOV     $dst, #-1\n\t"
9877             "JP,s    done\n\t"
9878             "JB,s    done\n\t"
9879             "SETNE   $dst\n\t"
9880             "MOVZB   $dst, $dst\n"
9881     "done:" %}
9882   ins_encode %{
9883     __ ucomiss($src1$$XMMRegister, $src2$$Address);
9884     emit_cmpfp3(_masm, $dst$$Register);
9885   %}
9886   ins_pipe( pipe_slow );
9887 %}
9888 
9889 // Spill to obtain 24-bit precision
9890 instruct subFPR24_reg(stackSlotF dst, regFPR src1, regFPR src2) %{
9891   predicate(UseSSE==0 && Compile::current()->select_24_bit_instr());
9892   match(Set dst (SubF src1 src2));
9893 
9894   format %{ "FSUB   $dst,$src1 - $src2" %}
9895   opcode(0xD8, 0x4); /* D8 E0+i or D8 /4 mod==0x3 ;; result in TOS */
9896   ins_encode( Push_Reg_FPR(src1),
9897               OpcReg_FPR(src2),
9898               Pop_Mem_FPR(dst) );
9899   ins_pipe( fpu_mem_reg_reg );
9900 %}
9901 //
9902 // This instruction does not round to 24-bits
9903 instruct subFPR_reg(regFPR dst, regFPR src) %{
9904   predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr());
9905   match(Set dst (SubF dst src));
9906 
9907   format %{ "FSUB   $dst,$src" %}
9908   opcode(0xDE, 0x5); /* DE E8+i  or DE /5 */
9909   ins_encode( Push_Reg_FPR(src),
9910               OpcP, RegOpc(dst) );
9911   ins_pipe( fpu_reg_reg );
9912 %}
9913 
9914 // Spill to obtain 24-bit precision
9915 instruct addFPR24_reg(stackSlotF dst, regFPR src1, regFPR src2) %{
9916   predicate(UseSSE==0 && Compile::current()->select_24_bit_instr());
9917   match(Set dst (AddF src1 src2));
9918 
9919   format %{ "FADD   $dst,$src1,$src2" %}
9920   opcode(0xD8, 0x0); /* D8 C0+i */
9921   ins_encode( Push_Reg_FPR(src2),
9922               OpcReg_FPR(src1),
9923               Pop_Mem_FPR(dst) );
9924   ins_pipe( fpu_mem_reg_reg );
9925 %}
9926 //
9927 // This instruction does not round to 24-bits
9928 instruct addFPR_reg(regFPR dst, regFPR src) %{
9929   predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr());
9930   match(Set dst (AddF dst src));
9931 
9932   format %{ "FLD    $src\n\t"
9933             "FADDp  $dst,ST" %}
9934   opcode(0xDE, 0x0); /* DE C0+i or DE /0*/
9935   ins_encode( Push_Reg_FPR(src),
9936               OpcP, RegOpc(dst) );
9937   ins_pipe( fpu_reg_reg );
9938 %}
9939 
9940 instruct absFPR_reg(regFPR1 dst, regFPR1 src) %{
9941   predicate(UseSSE==0);
9942   match(Set dst (AbsF src));
9943   ins_cost(100);
9944   format %{ "FABS" %}
9945   opcode(0xE1, 0xD9);
9946   ins_encode( OpcS, OpcP );
9947   ins_pipe( fpu_reg_reg );
9948 %}
9949 
9950 instruct negFPR_reg(regFPR1 dst, regFPR1 src) %{
9951   predicate(UseSSE==0);
9952   match(Set dst (NegF src));
9953   ins_cost(100);
9954   format %{ "FCHS" %}
9955   opcode(0xE0, 0xD9);
9956   ins_encode( OpcS, OpcP );
9957   ins_pipe( fpu_reg_reg );
9958 %}
9959 
9960 // Cisc-alternate to addFPR_reg
9961 // Spill to obtain 24-bit precision
9962 instruct addFPR24_reg_mem(stackSlotF dst, regFPR src1, memory src2) %{
9963   predicate(UseSSE==0 && Compile::current()->select_24_bit_instr());
9964   match(Set dst (AddF src1 (LoadF src2)));
9965 
9966   format %{ "FLD    $src2\n\t"
9967             "FADD   ST,$src1\n\t"
9968             "FSTP_S $dst" %}
9969   opcode(0xD8, 0x0, 0xD9); /* D8 C0+i */  /* LoadF  D9 /0 */
9970   ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src2),
9971               OpcReg_FPR(src1),
9972               Pop_Mem_FPR(dst) );
9973   ins_pipe( fpu_mem_reg_mem );
9974 %}
9975 //
9976 // Cisc-alternate to addFPR_reg
9977 // This instruction does not round to 24-bits
9978 instruct addFPR_reg_mem(regFPR dst, memory src) %{
9979   predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr());
9980   match(Set dst (AddF dst (LoadF src)));
9981 
9982   format %{ "FADD   $dst,$src" %}
9983   opcode(0xDE, 0x0, 0xD9); /* DE C0+i or DE /0*/  /* LoadF  D9 /0 */
9984   ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src),
9985               OpcP, RegOpc(dst) );
9986   ins_pipe( fpu_reg_mem );
9987 %}
9988 
9989 // // Following two instructions for _222_mpegaudio
9990 // Spill to obtain 24-bit precision
9991 instruct addFPR24_mem_reg(stackSlotF dst, regFPR src2, memory src1 ) %{
9992   predicate(UseSSE==0 && Compile::current()->select_24_bit_instr());
9993   match(Set dst (AddF src1 src2));
9994 
9995   format %{ "FADD   $dst,$src1,$src2" %}
9996   opcode(0xD8, 0x0, 0xD9); /* D8 C0+i */  /* LoadF  D9 /0 */
9997   ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src1),
9998               OpcReg_FPR(src2),
9999               Pop_Mem_FPR(dst) );
10000   ins_pipe( fpu_mem_reg_mem );
10001 %}
10002 
10003 // Cisc-spill variant
10004 // Spill to obtain 24-bit precision
10005 instruct addFPR24_mem_cisc(stackSlotF dst, memory src1, memory src2) %{
10006   predicate(UseSSE==0 && Compile::current()->select_24_bit_instr());
10007   match(Set dst (AddF src1 (LoadF src2)));
10008 
10009   format %{ "FADD   $dst,$src1,$src2 cisc" %}
10010   opcode(0xD8, 0x0, 0xD9); /* D8 C0+i */  /* LoadF  D9 /0 */
10011   ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src2),
10012               set_instruction_start,
10013               OpcP, RMopc_Mem(secondary,src1),
10014               Pop_Mem_FPR(dst) );
10015   ins_pipe( fpu_mem_mem_mem );
10016 %}
10017 
10018 // Spill to obtain 24-bit precision
10019 instruct addFPR24_mem_mem(stackSlotF dst, memory src1, memory src2) %{
10020   predicate(UseSSE==0 && Compile::current()->select_24_bit_instr());
10021   match(Set dst (AddF src1 src2));
10022 
10023   format %{ "FADD   $dst,$src1,$src2" %}
10024   opcode(0xD8, 0x0, 0xD9); /* D8 /0 */  /* LoadF  D9 /0 */
10025   ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src2),
10026               set_instruction_start,
10027               OpcP, RMopc_Mem(secondary,src1),
10028               Pop_Mem_FPR(dst) );
10029   ins_pipe( fpu_mem_mem_mem );
10030 %}
10031 
10032 
10033 // Spill to obtain 24-bit precision
10034 instruct addFPR24_reg_imm(stackSlotF dst, regFPR src, immFPR con) %{
10035   predicate(UseSSE==0 && Compile::current()->select_24_bit_instr());
10036   match(Set dst (AddF src con));
10037   format %{ "FLD    $src\n\t"
10038             "FADD_S [$constantaddress]\t# load from constant table: float=$con\n\t"
10039             "FSTP_S $dst"  %}
10040   ins_encode %{
10041     __ fld_s($src$$reg - 1);  // FLD ST(i-1)
10042     __ fadd_s($constantaddress($con));
10043     __ fstp_s(Address(rsp, $dst$$disp));
10044   %}
10045   ins_pipe(fpu_mem_reg_con);
10046 %}
10047 //
10048 // This instruction does not round to 24-bits
10049 instruct addFPR_reg_imm(regFPR dst, regFPR src, immFPR con) %{
10050   predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr());
10051   match(Set dst (AddF src con));
10052   format %{ "FLD    $src\n\t"
10053             "FADD_S [$constantaddress]\t# load from constant table: float=$con\n\t"
10054             "FSTP   $dst"  %}
10055   ins_encode %{
10056     __ fld_s($src$$reg - 1);  // FLD ST(i-1)
10057     __ fadd_s($constantaddress($con));
10058     __ fstp_d($dst$$reg);
10059   %}
10060   ins_pipe(fpu_reg_reg_con);
10061 %}
10062 
10063 // Spill to obtain 24-bit precision
10064 instruct mulFPR24_reg(stackSlotF dst, regFPR src1, regFPR src2) %{
10065   predicate(UseSSE==0 && Compile::current()->select_24_bit_instr());
10066   match(Set dst (MulF src1 src2));
10067 
10068   format %{ "FLD    $src1\n\t"
10069             "FMUL   $src2\n\t"
10070             "FSTP_S $dst"  %}
10071   opcode(0xD8, 0x1); /* D8 C8+i or D8 /1 ;; result in TOS */
10072   ins_encode( Push_Reg_FPR(src1),
10073               OpcReg_FPR(src2),
10074               Pop_Mem_FPR(dst) );
10075   ins_pipe( fpu_mem_reg_reg );
10076 %}
10077 //
10078 // This instruction does not round to 24-bits
10079 instruct mulFPR_reg(regFPR dst, regFPR src1, regFPR src2) %{
10080   predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr());
10081   match(Set dst (MulF src1 src2));
10082 
10083   format %{ "FLD    $src1\n\t"
10084             "FMUL   $src2\n\t"
10085             "FSTP_S $dst"  %}
10086   opcode(0xD8, 0x1); /* D8 C8+i */
10087   ins_encode( Push_Reg_FPR(src2),
10088               OpcReg_FPR(src1),
10089               Pop_Reg_FPR(dst) );
10090   ins_pipe( fpu_reg_reg_reg );
10091 %}
10092 
10093 
10094 // Spill to obtain 24-bit precision
10095 // Cisc-alternate to reg-reg multiply
10096 instruct mulFPR24_reg_mem(stackSlotF dst, regFPR src1, memory src2) %{
10097   predicate(UseSSE==0 && Compile::current()->select_24_bit_instr());
10098   match(Set dst (MulF src1 (LoadF src2)));
10099 
10100   format %{ "FLD_S  $src2\n\t"
10101             "FMUL   $src1\n\t"
10102             "FSTP_S $dst"  %}
10103   opcode(0xD8, 0x1, 0xD9); /* D8 C8+i or DE /1*/  /* LoadF D9 /0 */
10104   ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src2),
10105               OpcReg_FPR(src1),
10106               Pop_Mem_FPR(dst) );
10107   ins_pipe( fpu_mem_reg_mem );
10108 %}
10109 //
10110 // This instruction does not round to 24-bits
10111 // Cisc-alternate to reg-reg multiply
10112 instruct mulFPR_reg_mem(regFPR dst, regFPR src1, memory src2) %{
10113   predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr());
10114   match(Set dst (MulF src1 (LoadF src2)));
10115 
10116   format %{ "FMUL   $dst,$src1,$src2" %}
10117   opcode(0xD8, 0x1, 0xD9); /* D8 C8+i */  /* LoadF D9 /0 */
10118   ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src2),
10119               OpcReg_FPR(src1),
10120               Pop_Reg_FPR(dst) );
10121   ins_pipe( fpu_reg_reg_mem );
10122 %}
10123 
10124 // Spill to obtain 24-bit precision
10125 instruct mulFPR24_mem_mem(stackSlotF dst, memory src1, memory src2) %{
10126   predicate(UseSSE==0 && Compile::current()->select_24_bit_instr());
10127   match(Set dst (MulF src1 src2));
10128 
10129   format %{ "FMUL   $dst,$src1,$src2" %}
10130   opcode(0xD8, 0x1, 0xD9); /* D8 /1 */  /* LoadF D9 /0 */
10131   ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src2),
10132               set_instruction_start,
10133               OpcP, RMopc_Mem(secondary,src1),
10134               Pop_Mem_FPR(dst) );
10135   ins_pipe( fpu_mem_mem_mem );
10136 %}
10137 
10138 // Spill to obtain 24-bit precision
10139 instruct mulFPR24_reg_imm(stackSlotF dst, regFPR src, immFPR con) %{
10140   predicate(UseSSE==0 && Compile::current()->select_24_bit_instr());
10141   match(Set dst (MulF src con));
10142 
10143   format %{ "FLD    $src\n\t"
10144             "FMUL_S [$constantaddress]\t# load from constant table: float=$con\n\t"
10145             "FSTP_S $dst"  %}
10146   ins_encode %{
10147     __ fld_s($src$$reg - 1);  // FLD ST(i-1)
10148     __ fmul_s($constantaddress($con));
10149     __ fstp_s(Address(rsp, $dst$$disp));
10150   %}
10151   ins_pipe(fpu_mem_reg_con);
10152 %}
10153 //
10154 // This instruction does not round to 24-bits
10155 instruct mulFPR_reg_imm(regFPR dst, regFPR src, immFPR con) %{
10156   predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr());
10157   match(Set dst (MulF src con));
10158 
10159   format %{ "FLD    $src\n\t"
10160             "FMUL_S [$constantaddress]\t# load from constant table: float=$con\n\t"
10161             "FSTP   $dst"  %}
10162   ins_encode %{
10163     __ fld_s($src$$reg - 1);  // FLD ST(i-1)
10164     __ fmul_s($constantaddress($con));
10165     __ fstp_d($dst$$reg);
10166   %}
10167   ins_pipe(fpu_reg_reg_con);
10168 %}
10169 
10170 
10171 //
10172 // MACRO1 -- subsume unshared load into mulFPR
10173 // This instruction does not round to 24-bits
10174 instruct mulFPR_reg_load1(regFPR dst, regFPR src, memory mem1 ) %{
10175   predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr());
10176   match(Set dst (MulF (LoadF mem1) src));
10177 
10178   format %{ "FLD    $mem1    ===MACRO1===\n\t"
10179             "FMUL   ST,$src\n\t"
10180             "FSTP   $dst" %}
10181   opcode(0xD8, 0x1, 0xD9); /* D8 C8+i or D8 /1 */  /* LoadF D9 /0 */
10182   ins_encode( Opcode(tertiary), RMopc_Mem(0x00,mem1),
10183               OpcReg_FPR(src),
10184               Pop_Reg_FPR(dst) );
10185   ins_pipe( fpu_reg_reg_mem );
10186 %}
10187 //
10188 // MACRO2 -- addFPR a mulFPR which subsumed an unshared load
10189 // This instruction does not round to 24-bits
10190 instruct addFPR_mulFPR_reg_load1(regFPR dst, memory mem1, regFPR src1, regFPR src2) %{
10191   predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr());
10192   match(Set dst (AddF (MulF (LoadF mem1) src1) src2));
10193   ins_cost(95);
10194 
10195   format %{ "FLD    $mem1     ===MACRO2===\n\t"
10196             "FMUL   ST,$src1  subsume mulFPR left load\n\t"
10197             "FADD   ST,$src2\n\t"
10198             "FSTP   $dst" %}
10199   opcode(0xD9); /* LoadF D9 /0 */
10200   ins_encode( OpcP, RMopc_Mem(0x00,mem1),
10201               FMul_ST_reg(src1),
10202               FAdd_ST_reg(src2),
10203               Pop_Reg_FPR(dst) );
10204   ins_pipe( fpu_reg_mem_reg_reg );
10205 %}
10206 
10207 // MACRO3 -- addFPR a mulFPR
10208 // This instruction does not round to 24-bits.  It is a '2-address'
10209 // instruction in that the result goes back to src2.  This eliminates
10210 // a move from the macro; possibly the register allocator will have
10211 // to add it back (and maybe not).
10212 instruct addFPR_mulFPR_reg(regFPR src2, regFPR src1, regFPR src0) %{
10213   predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr());
10214   match(Set src2 (AddF (MulF src0 src1) src2));
10215 
10216   format %{ "FLD    $src0     ===MACRO3===\n\t"
10217             "FMUL   ST,$src1\n\t"
10218             "FADDP  $src2,ST" %}
10219   opcode(0xD9); /* LoadF D9 /0 */
10220   ins_encode( Push_Reg_FPR(src0),
10221               FMul_ST_reg(src1),
10222               FAddP_reg_ST(src2) );
10223   ins_pipe( fpu_reg_reg_reg );
10224 %}
10225 
10226 // MACRO4 -- divFPR subFPR
10227 // This instruction does not round to 24-bits
10228 instruct subFPR_divFPR_reg(regFPR dst, regFPR src1, regFPR src2, regFPR src3) %{
10229   predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr());
10230   match(Set dst (DivF (SubF src2 src1) src3));
10231 
10232   format %{ "FLD    $src2   ===MACRO4===\n\t"
10233             "FSUB   ST,$src1\n\t"
10234             "FDIV   ST,$src3\n\t"
10235             "FSTP  $dst" %}
10236   opcode(0xDE, 0x7); /* DE F8+i or DE /7*/
10237   ins_encode( Push_Reg_FPR(src2),
10238               subFPR_divFPR_encode(src1,src3),
10239               Pop_Reg_FPR(dst) );
10240   ins_pipe( fpu_reg_reg_reg_reg );
10241 %}
10242 
10243 // Spill to obtain 24-bit precision
10244 instruct divFPR24_reg(stackSlotF dst, regFPR src1, regFPR src2) %{
10245   predicate(UseSSE==0 && Compile::current()->select_24_bit_instr());
10246   match(Set dst (DivF src1 src2));
10247 
10248   format %{ "FDIV   $dst,$src1,$src2" %}
10249   opcode(0xD8, 0x6); /* D8 F0+i or DE /6*/
10250   ins_encode( Push_Reg_FPR(src1),
10251               OpcReg_FPR(src2),
10252               Pop_Mem_FPR(dst) );
10253   ins_pipe( fpu_mem_reg_reg );
10254 %}
10255 //
10256 // This instruction does not round to 24-bits
10257 instruct divFPR_reg(regFPR dst, regFPR src) %{
10258   predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr());
10259   match(Set dst (DivF dst src));
10260 
10261   format %{ "FDIV   $dst,$src" %}
10262   opcode(0xDE, 0x7); /* DE F8+i or DE /7*/
10263   ins_encode( Push_Reg_FPR(src),
10264               OpcP, RegOpc(dst) );
10265   ins_pipe( fpu_reg_reg );
10266 %}
10267 
10268 
10269 // Spill to obtain 24-bit precision
10270 instruct modFPR24_reg(stackSlotF dst, regFPR src1, regFPR src2, eAXRegI rax, eFlagsReg cr) %{
10271   predicate( UseSSE==0 && Compile::current()->select_24_bit_instr());
10272   match(Set dst (ModF src1 src2));
10273   effect(KILL rax, KILL cr); // emitModDPR() uses EAX and EFLAGS
10274 
10275   format %{ "FMOD   $dst,$src1,$src2" %}
10276   ins_encode( Push_Reg_Mod_DPR(src1, src2),
10277               emitModDPR(),
10278               Push_Result_Mod_DPR(src2),
10279               Pop_Mem_FPR(dst));
10280   ins_pipe( pipe_slow );
10281 %}
10282 //
10283 // This instruction does not round to 24-bits
10284 instruct modFPR_reg(regFPR dst, regFPR src, eAXRegI rax, eFlagsReg cr) %{
10285   predicate( UseSSE==0 && !Compile::current()->select_24_bit_instr());
10286   match(Set dst (ModF dst src));
10287   effect(KILL rax, KILL cr); // emitModDPR() uses EAX and EFLAGS
10288 
10289   format %{ "FMOD   $dst,$src" %}
10290   ins_encode(Push_Reg_Mod_DPR(dst, src),
10291               emitModDPR(),
10292               Push_Result_Mod_DPR(src),
10293               Pop_Reg_FPR(dst));
10294   ins_pipe( pipe_slow );
10295 %}
10296 
10297 instruct modF_reg(regF dst, regF src0, regF src1, eAXRegI rax, eFlagsReg cr) %{
10298   predicate(UseSSE>=1);
10299   match(Set dst (ModF src0 src1));
10300   effect(KILL rax, KILL cr);
10301   format %{ "SUB    ESP,4\t # FMOD\n"
10302           "\tMOVSS  [ESP+0],$src1\n"
10303           "\tFLD_S  [ESP+0]\n"
10304           "\tMOVSS  [ESP+0],$src0\n"
10305           "\tFLD_S  [ESP+0]\n"
10306      "loop:\tFPREM\n"
10307           "\tFWAIT\n"
10308           "\tFNSTSW AX\n"
10309           "\tSAHF\n"
10310           "\tJP     loop\n"
10311           "\tFSTP_S [ESP+0]\n"
10312           "\tMOVSS  $dst,[ESP+0]\n"
10313           "\tADD    ESP,4\n"
10314           "\tFSTP   ST0\t # Restore FPU Stack"
10315     %}
10316   ins_cost(250);
10317   ins_encode( Push_ModF_encoding(src0, src1), emitModDPR(), Push_ResultF(dst,0x4), PopFPU);
10318   ins_pipe( pipe_slow );
10319 %}
10320 
10321 
10322 //----------Arithmetic Conversion Instructions---------------------------------
10323 // The conversions operations are all Alpha sorted.  Please keep it that way!
10324 
10325 instruct roundFloat_mem_reg(stackSlotF dst, regFPR src) %{
10326   predicate(UseSSE==0);
10327   match(Set dst (RoundFloat src));
10328   ins_cost(125);
10329   format %{ "FST_S  $dst,$src\t# F-round" %}
10330   ins_encode( Pop_Mem_Reg_FPR(dst, src) );
10331   ins_pipe( fpu_mem_reg );
10332 %}
10333 
10334 instruct roundDouble_mem_reg(stackSlotD dst, regDPR src) %{
10335   predicate(UseSSE<=1);
10336   match(Set dst (RoundDouble src));
10337   ins_cost(125);
10338   format %{ "FST_D  $dst,$src\t# D-round" %}
10339   ins_encode( Pop_Mem_Reg_DPR(dst, src) );
10340   ins_pipe( fpu_mem_reg );
10341 %}
10342 
10343 // Force rounding to 24-bit precision and 6-bit exponent
10344 instruct convDPR2FPR_reg(stackSlotF dst, regDPR src) %{
10345   predicate(UseSSE==0);
10346   match(Set dst (ConvD2F src));
10347   format %{ "FST_S  $dst,$src\t# F-round" %}
10348   expand %{
10349     roundFloat_mem_reg(dst,src);
10350   %}
10351 %}
10352 
10353 // Force rounding to 24-bit precision and 6-bit exponent
10354 instruct convDPR2F_reg(regF dst, regDPR src, eFlagsReg cr) %{
10355   predicate(UseSSE==1);
10356   match(Set dst (ConvD2F src));
10357   effect( KILL cr );
10358   format %{ "SUB    ESP,4\n\t"
10359             "FST_S  [ESP],$src\t# F-round\n\t"
10360             "MOVSS  $dst,[ESP]\n\t"
10361             "ADD ESP,4" %}
10362   ins_encode %{
10363     __ subptr(rsp, 4);
10364     if ($src$$reg != FPR1L_enc) {
10365       __ fld_s($src$$reg-1);
10366       __ fstp_s(Address(rsp, 0));
10367     } else {
10368       __ fst_s(Address(rsp, 0));
10369     }
10370     __ movflt($dst$$XMMRegister, Address(rsp, 0));
10371     __ addptr(rsp, 4);
10372   %}
10373   ins_pipe( pipe_slow );
10374 %}
10375 
10376 // Force rounding double precision to single precision
10377 instruct convD2F_reg(regF dst, regD src) %{
10378   predicate(UseSSE>=2);
10379   match(Set dst (ConvD2F src));
10380   format %{ "CVTSD2SS $dst,$src\t# F-round" %}
10381   ins_encode %{
10382     __ cvtsd2ss ($dst$$XMMRegister, $src$$XMMRegister);
10383   %}
10384   ins_pipe( pipe_slow );
10385 %}
10386 
10387 instruct convFPR2DPR_reg_reg(regDPR dst, regFPR src) %{
10388   predicate(UseSSE==0);
10389   match(Set dst (ConvF2D src));
10390   format %{ "FST_S  $dst,$src\t# D-round" %}
10391   ins_encode( Pop_Reg_Reg_DPR(dst, src));
10392   ins_pipe( fpu_reg_reg );
10393 %}
10394 
10395 instruct convFPR2D_reg(stackSlotD dst, regFPR src) %{
10396   predicate(UseSSE==1);
10397   match(Set dst (ConvF2D src));
10398   format %{ "FST_D  $dst,$src\t# D-round" %}
10399   expand %{
10400     roundDouble_mem_reg(dst,src);
10401   %}
10402 %}
10403 
10404 instruct convF2DPR_reg(regDPR dst, regF src, eFlagsReg cr) %{
10405   predicate(UseSSE==1);
10406   match(Set dst (ConvF2D src));
10407   effect( KILL cr );
10408   format %{ "SUB    ESP,4\n\t"
10409             "MOVSS  [ESP] $src\n\t"
10410             "FLD_S  [ESP]\n\t"
10411             "ADD    ESP,4\n\t"
10412             "FSTP   $dst\t# D-round" %}
10413   ins_encode %{
10414     __ subptr(rsp, 4);
10415     __ movflt(Address(rsp, 0), $src$$XMMRegister);
10416     __ fld_s(Address(rsp, 0));
10417     __ addptr(rsp, 4);
10418     __ fstp_d($dst$$reg);
10419   %}
10420   ins_pipe( pipe_slow );
10421 %}
10422 
10423 instruct convF2D_reg(regD dst, regF src) %{
10424   predicate(UseSSE>=2);
10425   match(Set dst (ConvF2D src));
10426   format %{ "CVTSS2SD $dst,$src\t# D-round" %}
10427   ins_encode %{
10428     __ cvtss2sd ($dst$$XMMRegister, $src$$XMMRegister);
10429   %}
10430   ins_pipe( pipe_slow );
10431 %}
10432 
10433 // Convert a double to an int.  If the double is a NAN, stuff a zero in instead.
10434 instruct convDPR2I_reg_reg( eAXRegI dst, eDXRegI tmp, regDPR src, eFlagsReg cr ) %{
10435   predicate(UseSSE<=1);
10436   match(Set dst (ConvD2I src));
10437   effect( KILL tmp, KILL cr );
10438   format %{ "FLD    $src\t# Convert double to int \n\t"
10439             "FLDCW  trunc mode\n\t"
10440             "SUB    ESP,4\n\t"
10441             "FISTp  [ESP + #0]\n\t"
10442             "FLDCW  std/24-bit mode\n\t"
10443             "POP    EAX\n\t"
10444             "CMP    EAX,0x80000000\n\t"
10445             "JNE,s  fast\n\t"
10446             "FLD_D  $src\n\t"
10447             "CALL   d2i_wrapper\n"
10448       "fast:" %}
10449   ins_encode( Push_Reg_DPR(src), DPR2I_encoding(src) );
10450   ins_pipe( pipe_slow );
10451 %}
10452 
10453 // Convert a double to an int.  If the double is a NAN, stuff a zero in instead.
10454 instruct convD2I_reg_reg( eAXRegI dst, eDXRegI tmp, regD src, eFlagsReg cr ) %{
10455   predicate(UseSSE>=2);
10456   match(Set dst (ConvD2I src));
10457   effect( KILL tmp, KILL cr );
10458   format %{ "CVTTSD2SI $dst, $src\n\t"
10459             "CMP    $dst,0x80000000\n\t"
10460             "JNE,s  fast\n\t"
10461             "SUB    ESP, 8\n\t"
10462             "MOVSD  [ESP], $src\n\t"
10463             "FLD_D  [ESP]\n\t"
10464             "ADD    ESP, 8\n\t"
10465             "CALL   d2i_wrapper\n"
10466       "fast:" %}
10467   ins_encode %{
10468     Label fast;
10469     __ cvttsd2sil($dst$$Register, $src$$XMMRegister);
10470     __ cmpl($dst$$Register, 0x80000000);
10471     __ jccb(Assembler::notEqual, fast);
10472     __ subptr(rsp, 8);
10473     __ movdbl(Address(rsp, 0), $src$$XMMRegister);
10474     __ fld_d(Address(rsp, 0));
10475     __ addptr(rsp, 8);
10476     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::d2i_wrapper())));
10477     __ bind(fast);
10478   %}
10479   ins_pipe( pipe_slow );
10480 %}
10481 
10482 instruct convDPR2L_reg_reg( eADXRegL dst, regDPR src, eFlagsReg cr ) %{
10483   predicate(UseSSE<=1);
10484   match(Set dst (ConvD2L src));
10485   effect( KILL cr );
10486   format %{ "FLD    $src\t# Convert double to long\n\t"
10487             "FLDCW  trunc mode\n\t"
10488             "SUB    ESP,8\n\t"
10489             "FISTp  [ESP + #0]\n\t"
10490             "FLDCW  std/24-bit mode\n\t"
10491             "POP    EAX\n\t"
10492             "POP    EDX\n\t"
10493             "CMP    EDX,0x80000000\n\t"
10494             "JNE,s  fast\n\t"
10495             "TEST   EAX,EAX\n\t"
10496             "JNE,s  fast\n\t"
10497             "FLD    $src\n\t"
10498             "CALL   d2l_wrapper\n"
10499       "fast:" %}
10500   ins_encode( Push_Reg_DPR(src),  DPR2L_encoding(src) );
10501   ins_pipe( pipe_slow );
10502 %}
10503 
10504 // XMM lacks a float/double->long conversion, so use the old FPU stack.
10505 instruct convD2L_reg_reg( eADXRegL dst, regD src, eFlagsReg cr ) %{
10506   predicate (UseSSE>=2);
10507   match(Set dst (ConvD2L src));
10508   effect( KILL cr );
10509   format %{ "SUB    ESP,8\t# Convert double to long\n\t"
10510             "MOVSD  [ESP],$src\n\t"
10511             "FLD_D  [ESP]\n\t"
10512             "FLDCW  trunc mode\n\t"
10513             "FISTp  [ESP + #0]\n\t"
10514             "FLDCW  std/24-bit mode\n\t"
10515             "POP    EAX\n\t"
10516             "POP    EDX\n\t"
10517             "CMP    EDX,0x80000000\n\t"
10518             "JNE,s  fast\n\t"
10519             "TEST   EAX,EAX\n\t"
10520             "JNE,s  fast\n\t"
10521             "SUB    ESP,8\n\t"
10522             "MOVSD  [ESP],$src\n\t"
10523             "FLD_D  [ESP]\n\t"
10524             "ADD    ESP,8\n\t"
10525             "CALL   d2l_wrapper\n"
10526       "fast:" %}
10527   ins_encode %{
10528     Label fast;
10529     __ subptr(rsp, 8);
10530     __ movdbl(Address(rsp, 0), $src$$XMMRegister);
10531     __ fld_d(Address(rsp, 0));
10532     __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_trunc()));
10533     __ fistp_d(Address(rsp, 0));
10534     // Restore the rounding mode, mask the exception
10535     if (Compile::current()->in_24_bit_fp_mode()) {
10536       __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
10537     } else {
10538       __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
10539     }
10540     // Load the converted long, adjust CPU stack
10541     __ pop(rax);
10542     __ pop(rdx);
10543     __ cmpl(rdx, 0x80000000);
10544     __ jccb(Assembler::notEqual, fast);
10545     __ testl(rax, rax);
10546     __ jccb(Assembler::notEqual, fast);
10547     __ subptr(rsp, 8);
10548     __ movdbl(Address(rsp, 0), $src$$XMMRegister);
10549     __ fld_d(Address(rsp, 0));
10550     __ addptr(rsp, 8);
10551     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::d2l_wrapper())));
10552     __ bind(fast);
10553   %}
10554   ins_pipe( pipe_slow );
10555 %}
10556 
10557 // Convert a double to an int.  Java semantics require we do complex
10558 // manglations in the corner cases.  So we set the rounding mode to
10559 // 'zero', store the darned double down as an int, and reset the
10560 // rounding mode to 'nearest'.  The hardware stores a flag value down
10561 // if we would overflow or converted a NAN; we check for this and
10562 // and go the slow path if needed.
10563 instruct convFPR2I_reg_reg(eAXRegI dst, eDXRegI tmp, regFPR src, eFlagsReg cr ) %{
10564   predicate(UseSSE==0);
10565   match(Set dst (ConvF2I src));
10566   effect( KILL tmp, KILL cr );
10567   format %{ "FLD    $src\t# Convert float to int \n\t"
10568             "FLDCW  trunc mode\n\t"
10569             "SUB    ESP,4\n\t"
10570             "FISTp  [ESP + #0]\n\t"
10571             "FLDCW  std/24-bit mode\n\t"
10572             "POP    EAX\n\t"
10573             "CMP    EAX,0x80000000\n\t"
10574             "JNE,s  fast\n\t"
10575             "FLD    $src\n\t"
10576             "CALL   d2i_wrapper\n"
10577       "fast:" %}
10578   // DPR2I_encoding works for FPR2I
10579   ins_encode( Push_Reg_FPR(src), DPR2I_encoding(src) );
10580   ins_pipe( pipe_slow );
10581 %}
10582 
10583 // Convert a float in xmm to an int reg.
10584 instruct convF2I_reg(eAXRegI dst, eDXRegI tmp, regF src, eFlagsReg cr ) %{
10585   predicate(UseSSE>=1);
10586   match(Set dst (ConvF2I src));
10587   effect( KILL tmp, KILL cr );
10588   format %{ "CVTTSS2SI $dst, $src\n\t"
10589             "CMP    $dst,0x80000000\n\t"
10590             "JNE,s  fast\n\t"
10591             "SUB    ESP, 4\n\t"
10592             "MOVSS  [ESP], $src\n\t"
10593             "FLD    [ESP]\n\t"
10594             "ADD    ESP, 4\n\t"
10595             "CALL   d2i_wrapper\n"
10596       "fast:" %}
10597   ins_encode %{
10598     Label fast;
10599     __ cvttss2sil($dst$$Register, $src$$XMMRegister);
10600     __ cmpl($dst$$Register, 0x80000000);
10601     __ jccb(Assembler::notEqual, fast);
10602     __ subptr(rsp, 4);
10603     __ movflt(Address(rsp, 0), $src$$XMMRegister);
10604     __ fld_s(Address(rsp, 0));
10605     __ addptr(rsp, 4);
10606     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::d2i_wrapper())));
10607     __ bind(fast);
10608   %}
10609   ins_pipe( pipe_slow );
10610 %}
10611 
10612 instruct convFPR2L_reg_reg( eADXRegL dst, regFPR src, eFlagsReg cr ) %{
10613   predicate(UseSSE==0);
10614   match(Set dst (ConvF2L src));
10615   effect( KILL cr );
10616   format %{ "FLD    $src\t# Convert float to long\n\t"
10617             "FLDCW  trunc mode\n\t"
10618             "SUB    ESP,8\n\t"
10619             "FISTp  [ESP + #0]\n\t"
10620             "FLDCW  std/24-bit mode\n\t"
10621             "POP    EAX\n\t"
10622             "POP    EDX\n\t"
10623             "CMP    EDX,0x80000000\n\t"
10624             "JNE,s  fast\n\t"
10625             "TEST   EAX,EAX\n\t"
10626             "JNE,s  fast\n\t"
10627             "FLD    $src\n\t"
10628             "CALL   d2l_wrapper\n"
10629       "fast:" %}
10630   // DPR2L_encoding works for FPR2L
10631   ins_encode( Push_Reg_FPR(src), DPR2L_encoding(src) );
10632   ins_pipe( pipe_slow );
10633 %}
10634 
10635 // XMM lacks a float/double->long conversion, so use the old FPU stack.
10636 instruct convF2L_reg_reg( eADXRegL dst, regF src, eFlagsReg cr ) %{
10637   predicate (UseSSE>=1);
10638   match(Set dst (ConvF2L src));
10639   effect( KILL cr );
10640   format %{ "SUB    ESP,8\t# Convert float to long\n\t"
10641             "MOVSS  [ESP],$src\n\t"
10642             "FLD_S  [ESP]\n\t"
10643             "FLDCW  trunc mode\n\t"
10644             "FISTp  [ESP + #0]\n\t"
10645             "FLDCW  std/24-bit mode\n\t"
10646             "POP    EAX\n\t"
10647             "POP    EDX\n\t"
10648             "CMP    EDX,0x80000000\n\t"
10649             "JNE,s  fast\n\t"
10650             "TEST   EAX,EAX\n\t"
10651             "JNE,s  fast\n\t"
10652             "SUB    ESP,4\t# Convert float to long\n\t"
10653             "MOVSS  [ESP],$src\n\t"
10654             "FLD_S  [ESP]\n\t"
10655             "ADD    ESP,4\n\t"
10656             "CALL   d2l_wrapper\n"
10657       "fast:" %}
10658   ins_encode %{
10659     Label fast;
10660     __ subptr(rsp, 8);
10661     __ movflt(Address(rsp, 0), $src$$XMMRegister);
10662     __ fld_s(Address(rsp, 0));
10663     __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_trunc()));
10664     __ fistp_d(Address(rsp, 0));
10665     // Restore the rounding mode, mask the exception
10666     if (Compile::current()->in_24_bit_fp_mode()) {
10667       __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
10668     } else {
10669       __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
10670     }
10671     // Load the converted long, adjust CPU stack
10672     __ pop(rax);
10673     __ pop(rdx);
10674     __ cmpl(rdx, 0x80000000);
10675     __ jccb(Assembler::notEqual, fast);
10676     __ testl(rax, rax);
10677     __ jccb(Assembler::notEqual, fast);
10678     __ subptr(rsp, 4);
10679     __ movflt(Address(rsp, 0), $src$$XMMRegister);
10680     __ fld_s(Address(rsp, 0));
10681     __ addptr(rsp, 4);
10682     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::d2l_wrapper())));
10683     __ bind(fast);
10684   %}
10685   ins_pipe( pipe_slow );
10686 %}
10687 
10688 instruct convI2DPR_reg(regDPR dst, stackSlotI src) %{
10689   predicate( UseSSE<=1 );
10690   match(Set dst (ConvI2D src));
10691   format %{ "FILD   $src\n\t"
10692             "FSTP   $dst" %}
10693   opcode(0xDB, 0x0);  /* DB /0 */
10694   ins_encode(Push_Mem_I(src), Pop_Reg_DPR(dst));
10695   ins_pipe( fpu_reg_mem );
10696 %}
10697 
10698 instruct convI2D_reg(regD dst, rRegI src) %{
10699   predicate( UseSSE>=2 && !UseXmmI2D );
10700   match(Set dst (ConvI2D src));
10701   format %{ "CVTSI2SD $dst,$src" %}
10702   ins_encode %{
10703     __ cvtsi2sdl ($dst$$XMMRegister, $src$$Register);
10704   %}
10705   ins_pipe( pipe_slow );
10706 %}
10707 
10708 instruct convI2D_mem(regD dst, memory mem) %{
10709   predicate( UseSSE>=2 );
10710   match(Set dst (ConvI2D (LoadI mem)));
10711   format %{ "CVTSI2SD $dst,$mem" %}
10712   ins_encode %{
10713     __ cvtsi2sdl ($dst$$XMMRegister, $mem$$Address);
10714   %}
10715   ins_pipe( pipe_slow );
10716 %}
10717 
10718 instruct convXI2D_reg(regD dst, rRegI src)
10719 %{
10720   predicate( UseSSE>=2 && UseXmmI2D );
10721   match(Set dst (ConvI2D src));
10722 
10723   format %{ "MOVD  $dst,$src\n\t"
10724             "CVTDQ2PD $dst,$dst\t# i2d" %}
10725   ins_encode %{
10726     __ movdl($dst$$XMMRegister, $src$$Register);
10727     __ cvtdq2pd($dst$$XMMRegister, $dst$$XMMRegister);
10728   %}
10729   ins_pipe(pipe_slow); // XXX
10730 %}
10731 
10732 instruct convI2DPR_mem(regDPR dst, memory mem) %{
10733   predicate( UseSSE<=1 && !Compile::current()->select_24_bit_instr());
10734   match(Set dst (ConvI2D (LoadI mem)));
10735   format %{ "FILD   $mem\n\t"
10736             "FSTP   $dst" %}
10737   opcode(0xDB);      /* DB /0 */
10738   ins_encode( OpcP, RMopc_Mem(0x00,mem),
10739               Pop_Reg_DPR(dst));
10740   ins_pipe( fpu_reg_mem );
10741 %}
10742 
10743 // Convert a byte to a float; no rounding step needed.
10744 instruct conv24I2FPR_reg(regFPR dst, stackSlotI src) %{
10745   predicate( UseSSE==0 && n->in(1)->Opcode() == Op_AndI && n->in(1)->in(2)->is_Con() && n->in(1)->in(2)->get_int() == 255 );
10746   match(Set dst (ConvI2F src));
10747   format %{ "FILD   $src\n\t"
10748             "FSTP   $dst" %}
10749 
10750   opcode(0xDB, 0x0);  /* DB /0 */
10751   ins_encode(Push_Mem_I(src), Pop_Reg_FPR(dst));
10752   ins_pipe( fpu_reg_mem );
10753 %}
10754 
10755 // In 24-bit mode, force exponent rounding by storing back out
10756 instruct convI2FPR_SSF(stackSlotF dst, stackSlotI src) %{
10757   predicate( UseSSE==0 && Compile::current()->select_24_bit_instr());
10758   match(Set dst (ConvI2F src));
10759   ins_cost(200);
10760   format %{ "FILD   $src\n\t"
10761             "FSTP_S $dst" %}
10762   opcode(0xDB, 0x0);  /* DB /0 */
10763   ins_encode( Push_Mem_I(src),
10764               Pop_Mem_FPR(dst));
10765   ins_pipe( fpu_mem_mem );
10766 %}
10767 
10768 // In 24-bit mode, force exponent rounding by storing back out
10769 instruct convI2FPR_SSF_mem(stackSlotF dst, memory mem) %{
10770   predicate( UseSSE==0 && Compile::current()->select_24_bit_instr());
10771   match(Set dst (ConvI2F (LoadI mem)));
10772   ins_cost(200);
10773   format %{ "FILD   $mem\n\t"
10774             "FSTP_S $dst" %}
10775   opcode(0xDB);  /* DB /0 */
10776   ins_encode( OpcP, RMopc_Mem(0x00,mem),
10777               Pop_Mem_FPR(dst));
10778   ins_pipe( fpu_mem_mem );
10779 %}
10780 
10781 // This instruction does not round to 24-bits
10782 instruct convI2FPR_reg(regFPR dst, stackSlotI src) %{
10783   predicate( UseSSE==0 && !Compile::current()->select_24_bit_instr());
10784   match(Set dst (ConvI2F src));
10785   format %{ "FILD   $src\n\t"
10786             "FSTP   $dst" %}
10787   opcode(0xDB, 0x0);  /* DB /0 */
10788   ins_encode( Push_Mem_I(src),
10789               Pop_Reg_FPR(dst));
10790   ins_pipe( fpu_reg_mem );
10791 %}
10792 
10793 // This instruction does not round to 24-bits
10794 instruct convI2FPR_mem(regFPR dst, memory mem) %{
10795   predicate( UseSSE==0 && !Compile::current()->select_24_bit_instr());
10796   match(Set dst (ConvI2F (LoadI mem)));
10797   format %{ "FILD   $mem\n\t"
10798             "FSTP   $dst" %}
10799   opcode(0xDB);      /* DB /0 */
10800   ins_encode( OpcP, RMopc_Mem(0x00,mem),
10801               Pop_Reg_FPR(dst));
10802   ins_pipe( fpu_reg_mem );
10803 %}
10804 
10805 // Convert an int to a float in xmm; no rounding step needed.
10806 instruct convI2F_reg(regF dst, rRegI src) %{
10807   predicate( UseSSE==1 || UseSSE>=2 && !UseXmmI2F );
10808   match(Set dst (ConvI2F src));
10809   format %{ "CVTSI2SS $dst, $src" %}
10810   ins_encode %{
10811     __ cvtsi2ssl ($dst$$XMMRegister, $src$$Register);
10812   %}
10813   ins_pipe( pipe_slow );
10814 %}
10815 
10816  instruct convXI2F_reg(regF dst, rRegI src)
10817 %{
10818   predicate( UseSSE>=2 && UseXmmI2F );
10819   match(Set dst (ConvI2F src));
10820 
10821   format %{ "MOVD  $dst,$src\n\t"
10822             "CVTDQ2PS $dst,$dst\t# i2f" %}
10823   ins_encode %{
10824     __ movdl($dst$$XMMRegister, $src$$Register);
10825     __ cvtdq2ps($dst$$XMMRegister, $dst$$XMMRegister);
10826   %}
10827   ins_pipe(pipe_slow); // XXX
10828 %}
10829 
10830 instruct convI2L_reg( eRegL dst, rRegI src, eFlagsReg cr) %{
10831   match(Set dst (ConvI2L src));
10832   effect(KILL cr);
10833   ins_cost(375);
10834   format %{ "MOV    $dst.lo,$src\n\t"
10835             "MOV    $dst.hi,$src\n\t"
10836             "SAR    $dst.hi,31" %}
10837   ins_encode(convert_int_long(dst,src));
10838   ins_pipe( ialu_reg_reg_long );
10839 %}
10840 
10841 // Zero-extend convert int to long
10842 instruct convI2L_reg_zex(eRegL dst, rRegI src, immL_32bits mask, eFlagsReg flags ) %{
10843   match(Set dst (AndL (ConvI2L src) mask) );
10844   effect( KILL flags );
10845   ins_cost(250);
10846   format %{ "MOV    $dst.lo,$src\n\t"
10847             "XOR    $dst.hi,$dst.hi" %}
10848   opcode(0x33); // XOR
10849   ins_encode(enc_Copy(dst,src), OpcP, RegReg_Hi2(dst,dst) );
10850   ins_pipe( ialu_reg_reg_long );
10851 %}
10852 
10853 // Zero-extend long
10854 instruct zerox_long(eRegL dst, eRegL src, immL_32bits mask, eFlagsReg flags ) %{
10855   match(Set dst (AndL src mask) );
10856   effect( KILL flags );
10857   ins_cost(250);
10858   format %{ "MOV    $dst.lo,$src.lo\n\t"
10859             "XOR    $dst.hi,$dst.hi\n\t" %}
10860   opcode(0x33); // XOR
10861   ins_encode(enc_Copy(dst,src), OpcP, RegReg_Hi2(dst,dst) );
10862   ins_pipe( ialu_reg_reg_long );
10863 %}
10864 
10865 instruct convL2DPR_reg( stackSlotD dst, eRegL src, eFlagsReg cr) %{
10866   predicate (UseSSE<=1);
10867   match(Set dst (ConvL2D src));
10868   effect( KILL cr );
10869   format %{ "PUSH   $src.hi\t# Convert long to double\n\t"
10870             "PUSH   $src.lo\n\t"
10871             "FILD   ST,[ESP + #0]\n\t"
10872             "ADD    ESP,8\n\t"
10873             "FSTP_D $dst\t# D-round" %}
10874   opcode(0xDF, 0x5);  /* DF /5 */
10875   ins_encode(convert_long_double(src), Pop_Mem_DPR(dst));
10876   ins_pipe( pipe_slow );
10877 %}
10878 
10879 instruct convL2D_reg( regD dst, eRegL src, eFlagsReg cr) %{
10880   predicate (UseSSE>=2);
10881   match(Set dst (ConvL2D src));
10882   effect( KILL cr );
10883   format %{ "PUSH   $src.hi\t# Convert long to double\n\t"
10884             "PUSH   $src.lo\n\t"
10885             "FILD_D [ESP]\n\t"
10886             "FSTP_D [ESP]\n\t"
10887             "MOVSD  $dst,[ESP]\n\t"
10888             "ADD    ESP,8" %}
10889   opcode(0xDF, 0x5);  /* DF /5 */
10890   ins_encode(convert_long_double2(src), Push_ResultD(dst));
10891   ins_pipe( pipe_slow );
10892 %}
10893 
10894 instruct convL2F_reg( regF dst, eRegL src, eFlagsReg cr) %{
10895   predicate (UseSSE>=1);
10896   match(Set dst (ConvL2F src));
10897   effect( KILL cr );
10898   format %{ "PUSH   $src.hi\t# Convert long to single float\n\t"
10899             "PUSH   $src.lo\n\t"
10900             "FILD_D [ESP]\n\t"
10901             "FSTP_S [ESP]\n\t"
10902             "MOVSS  $dst,[ESP]\n\t"
10903             "ADD    ESP,8" %}
10904   opcode(0xDF, 0x5);  /* DF /5 */
10905   ins_encode(convert_long_double2(src), Push_ResultF(dst,0x8));
10906   ins_pipe( pipe_slow );
10907 %}
10908 
10909 instruct convL2FPR_reg( stackSlotF dst, eRegL src, eFlagsReg cr) %{
10910   match(Set dst (ConvL2F src));
10911   effect( KILL cr );
10912   format %{ "PUSH   $src.hi\t# Convert long to single float\n\t"
10913             "PUSH   $src.lo\n\t"
10914             "FILD   ST,[ESP + #0]\n\t"
10915             "ADD    ESP,8\n\t"
10916             "FSTP_S $dst\t# F-round" %}
10917   opcode(0xDF, 0x5);  /* DF /5 */
10918   ins_encode(convert_long_double(src), Pop_Mem_FPR(dst));
10919   ins_pipe( pipe_slow );
10920 %}
10921 
10922 instruct convL2I_reg( rRegI dst, eRegL src ) %{
10923   match(Set dst (ConvL2I src));
10924   effect( DEF dst, USE src );
10925   format %{ "MOV    $dst,$src.lo" %}
10926   ins_encode(enc_CopyL_Lo(dst,src));
10927   ins_pipe( ialu_reg_reg );
10928 %}
10929 
10930 
10931 instruct MoveF2I_stack_reg(rRegI dst, stackSlotF src) %{
10932   match(Set dst (MoveF2I src));
10933   effect( DEF dst, USE src );
10934   ins_cost(100);
10935   format %{ "MOV    $dst,$src\t# MoveF2I_stack_reg" %}
10936   ins_encode %{
10937     __ movl($dst$$Register, Address(rsp, $src$$disp));
10938   %}
10939   ins_pipe( ialu_reg_mem );
10940 %}
10941 
10942 instruct MoveFPR2I_reg_stack(stackSlotI dst, regFPR src) %{
10943   predicate(UseSSE==0);
10944   match(Set dst (MoveF2I src));
10945   effect( DEF dst, USE src );
10946 
10947   ins_cost(125);
10948   format %{ "FST_S  $dst,$src\t# MoveF2I_reg_stack" %}
10949   ins_encode( Pop_Mem_Reg_FPR(dst, src) );
10950   ins_pipe( fpu_mem_reg );
10951 %}
10952 
10953 instruct MoveF2I_reg_stack_sse(stackSlotI dst, regF src) %{
10954   predicate(UseSSE>=1);
10955   match(Set dst (MoveF2I src));
10956   effect( DEF dst, USE src );
10957 
10958   ins_cost(95);
10959   format %{ "MOVSS  $dst,$src\t# MoveF2I_reg_stack_sse" %}
10960   ins_encode %{
10961     __ movflt(Address(rsp, $dst$$disp), $src$$XMMRegister);
10962   %}
10963   ins_pipe( pipe_slow );
10964 %}
10965 
10966 instruct MoveF2I_reg_reg_sse(rRegI dst, regF src) %{
10967   predicate(UseSSE>=2);
10968   match(Set dst (MoveF2I src));
10969   effect( DEF dst, USE src );
10970   ins_cost(85);
10971   format %{ "MOVD   $dst,$src\t# MoveF2I_reg_reg_sse" %}
10972   ins_encode %{
10973     __ movdl($dst$$Register, $src$$XMMRegister);
10974   %}
10975   ins_pipe( pipe_slow );
10976 %}
10977 
10978 instruct MoveI2F_reg_stack(stackSlotF dst, rRegI src) %{
10979   match(Set dst (MoveI2F src));
10980   effect( DEF dst, USE src );
10981 
10982   ins_cost(100);
10983   format %{ "MOV    $dst,$src\t# MoveI2F_reg_stack" %}
10984   ins_encode %{
10985     __ movl(Address(rsp, $dst$$disp), $src$$Register);
10986   %}
10987   ins_pipe( ialu_mem_reg );
10988 %}
10989 
10990 
10991 instruct MoveI2FPR_stack_reg(regFPR dst, stackSlotI src) %{
10992   predicate(UseSSE==0);
10993   match(Set dst (MoveI2F src));
10994   effect(DEF dst, USE src);
10995 
10996   ins_cost(125);
10997   format %{ "FLD_S  $src\n\t"
10998             "FSTP   $dst\t# MoveI2F_stack_reg" %}
10999   opcode(0xD9);               /* D9 /0, FLD m32real */
11000   ins_encode( OpcP, RMopc_Mem_no_oop(0x00,src),
11001               Pop_Reg_FPR(dst) );
11002   ins_pipe( fpu_reg_mem );
11003 %}
11004 
11005 instruct MoveI2F_stack_reg_sse(regF dst, stackSlotI src) %{
11006   predicate(UseSSE>=1);
11007   match(Set dst (MoveI2F src));
11008   effect( DEF dst, USE src );
11009 
11010   ins_cost(95);
11011   format %{ "MOVSS  $dst,$src\t# MoveI2F_stack_reg_sse" %}
11012   ins_encode %{
11013     __ movflt($dst$$XMMRegister, Address(rsp, $src$$disp));
11014   %}
11015   ins_pipe( pipe_slow );
11016 %}
11017 
11018 instruct MoveI2F_reg_reg_sse(regF dst, rRegI src) %{
11019   predicate(UseSSE>=2);
11020   match(Set dst (MoveI2F src));
11021   effect( DEF dst, USE src );
11022 
11023   ins_cost(85);
11024   format %{ "MOVD   $dst,$src\t# MoveI2F_reg_reg_sse" %}
11025   ins_encode %{
11026     __ movdl($dst$$XMMRegister, $src$$Register);
11027   %}
11028   ins_pipe( pipe_slow );
11029 %}
11030 
11031 instruct MoveD2L_stack_reg(eRegL dst, stackSlotD src) %{
11032   match(Set dst (MoveD2L src));
11033   effect(DEF dst, USE src);
11034 
11035   ins_cost(250);
11036   format %{ "MOV    $dst.lo,$src\n\t"
11037             "MOV    $dst.hi,$src+4\t# MoveD2L_stack_reg" %}
11038   opcode(0x8B, 0x8B);
11039   ins_encode( OpcP, RegMem(dst,src), OpcS, RegMem_Hi(dst,src));
11040   ins_pipe( ialu_mem_long_reg );
11041 %}
11042 
11043 instruct MoveDPR2L_reg_stack(stackSlotL dst, regDPR src) %{
11044   predicate(UseSSE<=1);
11045   match(Set dst (MoveD2L src));
11046   effect(DEF dst, USE src);
11047 
11048   ins_cost(125);
11049   format %{ "FST_D  $dst,$src\t# MoveD2L_reg_stack" %}
11050   ins_encode( Pop_Mem_Reg_DPR(dst, src) );
11051   ins_pipe( fpu_mem_reg );
11052 %}
11053 
11054 instruct MoveD2L_reg_stack_sse(stackSlotL dst, regD src) %{
11055   predicate(UseSSE>=2);
11056   match(Set dst (MoveD2L src));
11057   effect(DEF dst, USE src);
11058   ins_cost(95);
11059   format %{ "MOVSD  $dst,$src\t# MoveD2L_reg_stack_sse" %}
11060   ins_encode %{
11061     __ movdbl(Address(rsp, $dst$$disp), $src$$XMMRegister);
11062   %}
11063   ins_pipe( pipe_slow );
11064 %}
11065 
11066 instruct MoveD2L_reg_reg_sse(eRegL dst, regD src, regD tmp) %{
11067   predicate(UseSSE>=2);
11068   match(Set dst (MoveD2L src));
11069   effect(DEF dst, USE src, TEMP tmp);
11070   ins_cost(85);
11071   format %{ "MOVD   $dst.lo,$src\n\t"
11072             "PSHUFLW $tmp,$src,0x4E\n\t"
11073             "MOVD   $dst.hi,$tmp\t# MoveD2L_reg_reg_sse" %}
11074   ins_encode %{
11075     __ movdl($dst$$Register, $src$$XMMRegister);
11076     __ pshuflw($tmp$$XMMRegister, $src$$XMMRegister, 0x4e);
11077     __ movdl(HIGH_FROM_LOW($dst$$Register), $tmp$$XMMRegister);
11078   %}
11079   ins_pipe( pipe_slow );
11080 %}
11081 
11082 instruct MoveL2D_reg_stack(stackSlotD dst, eRegL src) %{
11083   match(Set dst (MoveL2D src));
11084   effect(DEF dst, USE src);
11085 
11086   ins_cost(200);
11087   format %{ "MOV    $dst,$src.lo\n\t"
11088             "MOV    $dst+4,$src.hi\t# MoveL2D_reg_stack" %}
11089   opcode(0x89, 0x89);
11090   ins_encode( OpcP, RegMem( src, dst ), OpcS, RegMem_Hi( src, dst ) );
11091   ins_pipe( ialu_mem_long_reg );
11092 %}
11093 
11094 
11095 instruct MoveL2DPR_stack_reg(regDPR dst, stackSlotL src) %{
11096   predicate(UseSSE<=1);
11097   match(Set dst (MoveL2D src));
11098   effect(DEF dst, USE src);
11099   ins_cost(125);
11100 
11101   format %{ "FLD_D  $src\n\t"
11102             "FSTP   $dst\t# MoveL2D_stack_reg" %}
11103   opcode(0xDD);               /* DD /0, FLD m64real */
11104   ins_encode( OpcP, RMopc_Mem_no_oop(0x00,src),
11105               Pop_Reg_DPR(dst) );
11106   ins_pipe( fpu_reg_mem );
11107 %}
11108 
11109 
11110 instruct MoveL2D_stack_reg_sse(regD dst, stackSlotL src) %{
11111   predicate(UseSSE>=2 && UseXmmLoadAndClearUpper);
11112   match(Set dst (MoveL2D src));
11113   effect(DEF dst, USE src);
11114 
11115   ins_cost(95);
11116   format %{ "MOVSD  $dst,$src\t# MoveL2D_stack_reg_sse" %}
11117   ins_encode %{
11118     __ movdbl($dst$$XMMRegister, Address(rsp, $src$$disp));
11119   %}
11120   ins_pipe( pipe_slow );
11121 %}
11122 
11123 instruct MoveL2D_stack_reg_sse_partial(regD dst, stackSlotL src) %{
11124   predicate(UseSSE>=2 && !UseXmmLoadAndClearUpper);
11125   match(Set dst (MoveL2D src));
11126   effect(DEF dst, USE src);
11127 
11128   ins_cost(95);
11129   format %{ "MOVLPD $dst,$src\t# MoveL2D_stack_reg_sse" %}
11130   ins_encode %{
11131     __ movdbl($dst$$XMMRegister, Address(rsp, $src$$disp));
11132   %}
11133   ins_pipe( pipe_slow );
11134 %}
11135 
11136 instruct MoveL2D_reg_reg_sse(regD dst, eRegL src, regD tmp) %{
11137   predicate(UseSSE>=2);
11138   match(Set dst (MoveL2D src));
11139   effect(TEMP dst, USE src, TEMP tmp);
11140   ins_cost(85);
11141   format %{ "MOVD   $dst,$src.lo\n\t"
11142             "MOVD   $tmp,$src.hi\n\t"
11143             "PUNPCKLDQ $dst,$tmp\t# MoveL2D_reg_reg_sse" %}
11144   ins_encode %{
11145     __ movdl($dst$$XMMRegister, $src$$Register);
11146     __ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register));
11147     __ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister);
11148   %}
11149   ins_pipe( pipe_slow );
11150 %}
11151 
11152 
11153 // =======================================================================
11154 // fast clearing of an array
11155 instruct rep_stos(eCXRegI cnt, eDIRegP base, eAXRegI zero, Universe dummy, eFlagsReg cr) %{
11156   predicate(!UseFastStosb);
11157   match(Set dummy (ClearArray cnt base));
11158   effect(USE_KILL cnt, USE_KILL base, KILL zero, KILL cr);
11159   format %{ "XOR    EAX,EAX\t# ClearArray:\n\t"
11160             "SHL    ECX,1\t# Convert doublewords to words\n\t"
11161             "REP STOS\t# store EAX into [EDI++] while ECX--" %}
11162   ins_encode %{ 
11163     __ clear_mem($base$$Register, $cnt$$Register, $zero$$Register);
11164   %}
11165   ins_pipe( pipe_slow );
11166 %}
11167 
11168 instruct rep_fast_stosb(eCXRegI cnt, eDIRegP base, eAXRegI zero, Universe dummy, eFlagsReg cr) %{
11169   predicate(UseFastStosb);
11170   match(Set dummy (ClearArray cnt base));
11171   effect(USE_KILL cnt, USE_KILL base, KILL zero, KILL cr);
11172   format %{ "XOR    EAX,EAX\t# ClearArray:\n\t"
11173             "SHL    ECX,3\t# Convert doublewords to bytes\n\t"
11174             "REP STOSB\t# store EAX into [EDI++] while ECX--" %}
11175   ins_encode %{ 
11176     __ clear_mem($base$$Register, $cnt$$Register, $zero$$Register);
11177   %}
11178   ins_pipe( pipe_slow );
11179 %}
11180 
11181 instruct string_compare(eDIRegP str1, eCXRegI cnt1, eSIRegP str2, eDXRegI cnt2,
11182                         eAXRegI result, regD tmp1, eFlagsReg cr) %{
11183   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
11184   effect(TEMP tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
11185 
11186   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   // KILL $tmp1" %}
11187   ins_encode %{
11188     __ string_compare($str1$$Register, $str2$$Register,
11189                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
11190                       $tmp1$$XMMRegister);
11191   %}
11192   ins_pipe( pipe_slow );
11193 %}
11194 
11195 // fast string equals
11196 instruct string_equals(eDIRegP str1, eSIRegP str2, eCXRegI cnt, eAXRegI result,
11197                        regD tmp1, regD tmp2, eBXRegI tmp3, eFlagsReg cr) %{
11198   match(Set result (StrEquals (Binary str1 str2) cnt));
11199   effect(TEMP tmp1, TEMP tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL tmp3, KILL cr);
11200 
11201   format %{ "String Equals $str1,$str2,$cnt -> $result    // KILL $tmp1, $tmp2, $tmp3" %}
11202   ins_encode %{
11203     __ char_arrays_equals(false, $str1$$Register, $str2$$Register,
11204                           $cnt$$Register, $result$$Register, $tmp3$$Register,
11205                           $tmp1$$XMMRegister, $tmp2$$XMMRegister);
11206   %}
11207   ins_pipe( pipe_slow );
11208 %}
11209 
11210 // fast search of substring with known size.
11211 instruct string_indexof_con(eDIRegP str1, eDXRegI cnt1, eSIRegP str2, immI int_cnt2,
11212                             eBXRegI result, regD vec, eAXRegI cnt2, eCXRegI tmp, eFlagsReg cr) %{
11213   predicate(UseSSE42Intrinsics);
11214   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
11215   effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, KILL cnt2, KILL tmp, KILL cr);
11216 
11217   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result   // KILL $vec, $cnt1, $cnt2, $tmp" %}
11218   ins_encode %{
11219     int icnt2 = (int)$int_cnt2$$constant;
11220     if (icnt2 >= 8) {
11221       // IndexOf for constant substrings with size >= 8 elements
11222       // which don't need to be loaded through stack.
11223       __ string_indexofC8($str1$$Register, $str2$$Register,
11224                           $cnt1$$Register, $cnt2$$Register,
11225                           icnt2, $result$$Register,
11226                           $vec$$XMMRegister, $tmp$$Register);
11227     } else {
11228       // Small strings are loaded through stack if they cross page boundary.
11229       __ string_indexof($str1$$Register, $str2$$Register,
11230                         $cnt1$$Register, $cnt2$$Register,
11231                         icnt2, $result$$Register,
11232                         $vec$$XMMRegister, $tmp$$Register);
11233     }
11234   %}
11235   ins_pipe( pipe_slow );
11236 %}
11237 
11238 instruct string_indexof(eDIRegP str1, eDXRegI cnt1, eSIRegP str2, eAXRegI cnt2,
11239                         eBXRegI result, regD vec, eCXRegI tmp, eFlagsReg cr) %{
11240   predicate(UseSSE42Intrinsics);
11241   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
11242   effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp, KILL cr);
11243 
11244   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result   // KILL all" %}
11245   ins_encode %{
11246     __ string_indexof($str1$$Register, $str2$$Register,
11247                       $cnt1$$Register, $cnt2$$Register,
11248                       (-1), $result$$Register,
11249                       $vec$$XMMRegister, $tmp$$Register);
11250   %}
11251   ins_pipe( pipe_slow );
11252 %}
11253 
11254 // fast array equals
11255 instruct array_equals(eDIRegP ary1, eSIRegP ary2, eAXRegI result,
11256                       regD tmp1, regD tmp2, eCXRegI tmp3, eBXRegI tmp4, eFlagsReg cr)
11257 %{
11258   match(Set result (AryEq ary1 ary2));
11259   effect(TEMP tmp1, TEMP tmp2, USE_KILL ary1, USE_KILL ary2, KILL tmp3, KILL tmp4, KILL cr);
11260   //ins_cost(300);
11261 
11262   format %{ "Array Equals $ary1,$ary2 -> $result   // KILL $tmp1, $tmp2, $tmp3, $tmp4" %}
11263   ins_encode %{
11264     __ char_arrays_equals(true, $ary1$$Register, $ary2$$Register,
11265                           $tmp3$$Register, $result$$Register, $tmp4$$Register,
11266                           $tmp1$$XMMRegister, $tmp2$$XMMRegister);
11267   %}
11268   ins_pipe( pipe_slow );
11269 %}
11270 
11271 // encode char[] to byte[] in ISO_8859_1
11272 instruct encode_iso_array(eSIRegP src, eDIRegP dst, eDXRegI len,
11273                           regD tmp1, regD tmp2, regD tmp3, regD tmp4,
11274                           eCXRegI tmp5, eAXRegI result, eFlagsReg cr) %{
11275   match(Set result (EncodeISOArray src (Binary dst len)));
11276   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL tmp5, KILL cr);
11277 
11278   format %{ "Encode array $src,$dst,$len -> $result    // KILL ECX, EDX, $tmp1, $tmp2, $tmp3, $tmp4, ESI, EDI " %}
11279   ins_encode %{
11280     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
11281                         $tmp1$$XMMRegister, $tmp2$$XMMRegister, $tmp3$$XMMRegister,
11282                         $tmp4$$XMMRegister, $tmp5$$Register, $result$$Register);
11283   %}
11284   ins_pipe( pipe_slow );
11285 %}
11286 
11287 
11288 //----------Control Flow Instructions------------------------------------------
11289 // Signed compare Instructions
11290 instruct compI_eReg(eFlagsReg cr, rRegI op1, rRegI op2) %{
11291   match(Set cr (CmpI op1 op2));
11292   effect( DEF cr, USE op1, USE op2 );
11293   format %{ "CMP    $op1,$op2" %}
11294   opcode(0x3B);  /* Opcode 3B /r */
11295   ins_encode( OpcP, RegReg( op1, op2) );
11296   ins_pipe( ialu_cr_reg_reg );
11297 %}
11298 
11299 instruct compI_eReg_imm(eFlagsReg cr, rRegI op1, immI op2) %{
11300   match(Set cr (CmpI op1 op2));
11301   effect( DEF cr, USE op1 );
11302   format %{ "CMP    $op1,$op2" %}
11303   opcode(0x81,0x07);  /* Opcode 81 /7 */
11304   // ins_encode( RegImm( op1, op2) );  /* Was CmpImm */
11305   ins_encode( OpcSErm( op1, op2 ), Con8or32( op2 ) );
11306   ins_pipe( ialu_cr_reg_imm );
11307 %}
11308 
11309 // Cisc-spilled version of cmpI_eReg
11310 instruct compI_eReg_mem(eFlagsReg cr, rRegI op1, memory op2) %{
11311   match(Set cr (CmpI op1 (LoadI op2)));
11312 
11313   format %{ "CMP    $op1,$op2" %}
11314   ins_cost(500);
11315   opcode(0x3B);  /* Opcode 3B /r */
11316   ins_encode( OpcP, RegMem( op1, op2) );
11317   ins_pipe( ialu_cr_reg_mem );
11318 %}
11319 
11320 instruct testI_reg( eFlagsReg cr, rRegI src, immI0 zero ) %{
11321   match(Set cr (CmpI src zero));
11322   effect( DEF cr, USE src );
11323 
11324   format %{ "TEST   $src,$src" %}
11325   opcode(0x85);
11326   ins_encode( OpcP, RegReg( src, src ) );
11327   ins_pipe( ialu_cr_reg_imm );
11328 %}
11329 
11330 instruct testI_reg_imm( eFlagsReg cr, rRegI src, immI con, immI0 zero ) %{
11331   match(Set cr (CmpI (AndI src con) zero));
11332 
11333   format %{ "TEST   $src,$con" %}
11334   opcode(0xF7,0x00);
11335   ins_encode( OpcP, RegOpc(src), Con32(con) );
11336   ins_pipe( ialu_cr_reg_imm );
11337 %}
11338 
11339 instruct testI_reg_mem( eFlagsReg cr, rRegI src, memory mem, immI0 zero ) %{
11340   match(Set cr (CmpI (AndI src mem) zero));
11341 
11342   format %{ "TEST   $src,$mem" %}
11343   opcode(0x85);
11344   ins_encode( OpcP, RegMem( src, mem ) );
11345   ins_pipe( ialu_cr_reg_mem );
11346 %}
11347 
11348 // Unsigned compare Instructions; really, same as signed except they
11349 // produce an eFlagsRegU instead of eFlagsReg.
11350 instruct compU_eReg(eFlagsRegU cr, rRegI op1, rRegI op2) %{
11351   match(Set cr (CmpU op1 op2));
11352 
11353   format %{ "CMPu   $op1,$op2" %}
11354   opcode(0x3B);  /* Opcode 3B /r */
11355   ins_encode( OpcP, RegReg( op1, op2) );
11356   ins_pipe( ialu_cr_reg_reg );
11357 %}
11358 
11359 instruct compU_eReg_imm(eFlagsRegU cr, rRegI op1, immI op2) %{
11360   match(Set cr (CmpU op1 op2));
11361 
11362   format %{ "CMPu   $op1,$op2" %}
11363   opcode(0x81,0x07);  /* Opcode 81 /7 */
11364   ins_encode( OpcSErm( op1, op2 ), Con8or32( op2 ) );
11365   ins_pipe( ialu_cr_reg_imm );
11366 %}
11367 
11368 // // Cisc-spilled version of cmpU_eReg
11369 instruct compU_eReg_mem(eFlagsRegU cr, rRegI op1, memory op2) %{
11370   match(Set cr (CmpU op1 (LoadI op2)));
11371 
11372   format %{ "CMPu   $op1,$op2" %}
11373   ins_cost(500);
11374   opcode(0x3B);  /* Opcode 3B /r */
11375   ins_encode( OpcP, RegMem( op1, op2) );
11376   ins_pipe( ialu_cr_reg_mem );
11377 %}
11378 
11379 // // Cisc-spilled version of cmpU_eReg
11380 //instruct compU_mem_eReg(eFlagsRegU cr, memory op1, rRegI op2) %{
11381 //  match(Set cr (CmpU (LoadI op1) op2));
11382 //
11383 //  format %{ "CMPu   $op1,$op2" %}
11384 //  ins_cost(500);
11385 //  opcode(0x39);  /* Opcode 39 /r */
11386 //  ins_encode( OpcP, RegMem( op1, op2) );
11387 //%}
11388 
11389 instruct testU_reg( eFlagsRegU cr, rRegI src, immI0 zero ) %{
11390   match(Set cr (CmpU src zero));
11391 
11392   format %{ "TESTu  $src,$src" %}
11393   opcode(0x85);
11394   ins_encode( OpcP, RegReg( src, src ) );
11395   ins_pipe( ialu_cr_reg_imm );
11396 %}
11397 
11398 // Unsigned pointer compare Instructions
11399 instruct compP_eReg(eFlagsRegU cr, eRegP op1, eRegP op2) %{
11400   match(Set cr (CmpP op1 op2));
11401 
11402   format %{ "CMPu   $op1,$op2" %}
11403   opcode(0x3B);  /* Opcode 3B /r */
11404   ins_encode( OpcP, RegReg( op1, op2) );
11405   ins_pipe( ialu_cr_reg_reg );
11406 %}
11407 
11408 instruct compP_eReg_imm(eFlagsRegU cr, eRegP op1, immP op2) %{
11409   match(Set cr (CmpP op1 op2));
11410 
11411   format %{ "CMPu   $op1,$op2" %}
11412   opcode(0x81,0x07);  /* Opcode 81 /7 */
11413   ins_encode( OpcSErm( op1, op2 ), Con8or32( op2 ) );
11414   ins_pipe( ialu_cr_reg_imm );
11415 %}
11416 
11417 // // Cisc-spilled version of cmpP_eReg
11418 instruct compP_eReg_mem(eFlagsRegU cr, eRegP op1, memory op2) %{
11419   match(Set cr (CmpP op1 (LoadP op2)));
11420 
11421   format %{ "CMPu   $op1,$op2" %}
11422   ins_cost(500);
11423   opcode(0x3B);  /* Opcode 3B /r */
11424   ins_encode( OpcP, RegMem( op1, op2) );
11425   ins_pipe( ialu_cr_reg_mem );
11426 %}
11427 
11428 // // Cisc-spilled version of cmpP_eReg
11429 //instruct compP_mem_eReg(eFlagsRegU cr, memory op1, eRegP op2) %{
11430 //  match(Set cr (CmpP (LoadP op1) op2));
11431 //
11432 //  format %{ "CMPu   $op1,$op2" %}
11433 //  ins_cost(500);
11434 //  opcode(0x39);  /* Opcode 39 /r */
11435 //  ins_encode( OpcP, RegMem( op1, op2) );
11436 //%}
11437 
11438 // Compare raw pointer (used in out-of-heap check).
11439 // Only works because non-oop pointers must be raw pointers
11440 // and raw pointers have no anti-dependencies.
11441 instruct compP_mem_eReg( eFlagsRegU cr, eRegP op1, memory op2 ) %{
11442   predicate( n->in(2)->in(2)->bottom_type()->reloc() == relocInfo::none );
11443   match(Set cr (CmpP op1 (LoadP op2)));
11444 
11445   format %{ "CMPu   $op1,$op2" %}
11446   opcode(0x3B);  /* Opcode 3B /r */
11447   ins_encode( OpcP, RegMem( op1, op2) );
11448   ins_pipe( ialu_cr_reg_mem );
11449 %}
11450 
11451 //
11452 // This will generate a signed flags result. This should be ok
11453 // since any compare to a zero should be eq/neq.
11454 instruct testP_reg( eFlagsReg cr, eRegP src, immP0 zero ) %{
11455   match(Set cr (CmpP src zero));
11456 
11457   format %{ "TEST   $src,$src" %}
11458   opcode(0x85);
11459   ins_encode( OpcP, RegReg( src, src ) );
11460   ins_pipe( ialu_cr_reg_imm );
11461 %}
11462 
11463 // Cisc-spilled version of testP_reg
11464 // This will generate a signed flags result. This should be ok
11465 // since any compare to a zero should be eq/neq.
11466 instruct testP_Reg_mem( eFlagsReg cr, memory op, immI0 zero ) %{
11467   match(Set cr (CmpP (LoadP op) zero));
11468 
11469   format %{ "TEST   $op,0xFFFFFFFF" %}
11470   ins_cost(500);
11471   opcode(0xF7);               /* Opcode F7 /0 */
11472   ins_encode( OpcP, RMopc_Mem(0x00,op), Con_d32(0xFFFFFFFF) );
11473   ins_pipe( ialu_cr_reg_imm );
11474 %}
11475 
11476 // Yanked all unsigned pointer compare operations.
11477 // Pointer compares are done with CmpP which is already unsigned.
11478 
11479 //----------Max and Min--------------------------------------------------------
11480 // Min Instructions
11481 ////
11482 //   *** Min and Max using the conditional move are slower than the
11483 //   *** branch version on a Pentium III.
11484 // // Conditional move for min
11485 //instruct cmovI_reg_lt( rRegI op2, rRegI op1, eFlagsReg cr ) %{
11486 //  effect( USE_DEF op2, USE op1, USE cr );
11487 //  format %{ "CMOVlt $op2,$op1\t! min" %}
11488 //  opcode(0x4C,0x0F);
11489 //  ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
11490 //  ins_pipe( pipe_cmov_reg );
11491 //%}
11492 //
11493 //// Min Register with Register (P6 version)
11494 //instruct minI_eReg_p6( rRegI op1, rRegI op2 ) %{
11495 //  predicate(VM_Version::supports_cmov() );
11496 //  match(Set op2 (MinI op1 op2));
11497 //  ins_cost(200);
11498 //  expand %{
11499 //    eFlagsReg cr;
11500 //    compI_eReg(cr,op1,op2);
11501 //    cmovI_reg_lt(op2,op1,cr);
11502 //  %}
11503 //%}
11504 
11505 // Min Register with Register (generic version)
11506 instruct minI_eReg(rRegI dst, rRegI src, eFlagsReg flags) %{
11507   match(Set dst (MinI dst src));
11508   effect(KILL flags);
11509   ins_cost(300);
11510 
11511   format %{ "MIN    $dst,$src" %}
11512   opcode(0xCC);
11513   ins_encode( min_enc(dst,src) );
11514   ins_pipe( pipe_slow );
11515 %}
11516 
11517 // Max Register with Register
11518 //   *** Min and Max using the conditional move are slower than the
11519 //   *** branch version on a Pentium III.
11520 // // Conditional move for max
11521 //instruct cmovI_reg_gt( rRegI op2, rRegI op1, eFlagsReg cr ) %{
11522 //  effect( USE_DEF op2, USE op1, USE cr );
11523 //  format %{ "CMOVgt $op2,$op1\t! max" %}
11524 //  opcode(0x4F,0x0F);
11525 //  ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
11526 //  ins_pipe( pipe_cmov_reg );
11527 //%}
11528 //
11529 // // Max Register with Register (P6 version)
11530 //instruct maxI_eReg_p6( rRegI op1, rRegI op2 ) %{
11531 //  predicate(VM_Version::supports_cmov() );
11532 //  match(Set op2 (MaxI op1 op2));
11533 //  ins_cost(200);
11534 //  expand %{
11535 //    eFlagsReg cr;
11536 //    compI_eReg(cr,op1,op2);
11537 //    cmovI_reg_gt(op2,op1,cr);
11538 //  %}
11539 //%}
11540 
11541 // Max Register with Register (generic version)
11542 instruct maxI_eReg(rRegI dst, rRegI src, eFlagsReg flags) %{
11543   match(Set dst (MaxI dst src));
11544   effect(KILL flags);
11545   ins_cost(300);
11546 
11547   format %{ "MAX    $dst,$src" %}
11548   opcode(0xCC);
11549   ins_encode( max_enc(dst,src) );
11550   ins_pipe( pipe_slow );
11551 %}
11552 
11553 // ============================================================================
11554 // Counted Loop limit node which represents exact final iterator value.
11555 // Note: the resulting value should fit into integer range since
11556 // counted loops have limit check on overflow.
11557 instruct loopLimit_eReg(eAXRegI limit, nadxRegI init, immI stride, eDXRegI limit_hi, nadxRegI tmp, eFlagsReg flags) %{
11558   match(Set limit (LoopLimit (Binary init limit) stride));
11559   effect(TEMP limit_hi, TEMP tmp, KILL flags);
11560   ins_cost(300);
11561 
11562   format %{ "loopLimit $init,$limit,$stride  # $limit = $init + $stride *( $limit - $init + $stride -1)/ $stride, kills $limit_hi" %}
11563   ins_encode %{
11564     int strd = (int)$stride$$constant;
11565     assert(strd != 1 && strd != -1, "sanity");
11566     int m1 = (strd > 0) ? 1 : -1;
11567     // Convert limit to long (EAX:EDX)
11568     __ cdql();
11569     // Convert init to long (init:tmp)
11570     __ movl($tmp$$Register, $init$$Register);
11571     __ sarl($tmp$$Register, 31);
11572     // $limit - $init
11573     __ subl($limit$$Register, $init$$Register);
11574     __ sbbl($limit_hi$$Register, $tmp$$Register);
11575     // + ($stride - 1)
11576     if (strd > 0) {
11577       __ addl($limit$$Register, (strd - 1));
11578       __ adcl($limit_hi$$Register, 0);
11579       __ movl($tmp$$Register, strd);
11580     } else {
11581       __ addl($limit$$Register, (strd + 1));
11582       __ adcl($limit_hi$$Register, -1);
11583       __ lneg($limit_hi$$Register, $limit$$Register);
11584       __ movl($tmp$$Register, -strd);
11585     }
11586     // signed devision: (EAX:EDX) / pos_stride
11587     __ idivl($tmp$$Register);
11588     if (strd < 0) {
11589       // restore sign
11590       __ negl($tmp$$Register);
11591     }
11592     // (EAX) * stride
11593     __ mull($tmp$$Register);
11594     // + init (ignore upper bits)
11595     __ addl($limit$$Register, $init$$Register);
11596   %}
11597   ins_pipe( pipe_slow );
11598 %}
11599 
11600 // ============================================================================
11601 // Branch Instructions
11602 // Jump Table
11603 instruct jumpXtnd(rRegI switch_val) %{
11604   match(Jump switch_val);
11605   ins_cost(350);
11606   format %{  "JMP    [$constantaddress](,$switch_val,1)\n\t" %}
11607   ins_encode %{
11608     // Jump to Address(table_base + switch_reg)
11609     Address index(noreg, $switch_val$$Register, Address::times_1);
11610     __ jump(ArrayAddress($constantaddress, index));
11611   %}
11612   ins_pipe(pipe_jmp);
11613 %}
11614 
11615 // Jump Direct - Label defines a relative address from JMP+1
11616 instruct jmpDir(label labl) %{
11617   match(Goto);
11618   effect(USE labl);
11619 
11620   ins_cost(300);
11621   format %{ "JMP    $labl" %}
11622   size(5);
11623   ins_encode %{
11624     Label* L = $labl$$label;
11625     __ jmp(*L, false); // Always long jump
11626   %}
11627   ins_pipe( pipe_jmp );
11628 %}
11629 
11630 // Jump Direct Conditional - Label defines a relative address from Jcc+1
11631 instruct jmpCon(cmpOp cop, eFlagsReg cr, label labl) %{
11632   match(If cop cr);
11633   effect(USE labl);
11634 
11635   ins_cost(300);
11636   format %{ "J$cop    $labl" %}
11637   size(6);
11638   ins_encode %{
11639     Label* L = $labl$$label;
11640     __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump
11641   %}
11642   ins_pipe( pipe_jcc );
11643 %}
11644 
11645 // Jump Direct Conditional - Label defines a relative address from Jcc+1
11646 instruct jmpLoopEnd(cmpOp cop, eFlagsReg cr, label labl) %{
11647   match(CountedLoopEnd cop cr);
11648   effect(USE labl);
11649 
11650   ins_cost(300);
11651   format %{ "J$cop    $labl\t# Loop end" %}
11652   size(6);
11653   ins_encode %{
11654     Label* L = $labl$$label;
11655     __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump
11656   %}
11657   ins_pipe( pipe_jcc );
11658 %}
11659 
11660 // Jump Direct Conditional - Label defines a relative address from Jcc+1
11661 instruct jmpLoopEndU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
11662   match(CountedLoopEnd cop cmp);
11663   effect(USE labl);
11664 
11665   ins_cost(300);
11666   format %{ "J$cop,u  $labl\t# Loop end" %}
11667   size(6);
11668   ins_encode %{
11669     Label* L = $labl$$label;
11670     __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump
11671   %}
11672   ins_pipe( pipe_jcc );
11673 %}
11674 
11675 instruct jmpLoopEndUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
11676   match(CountedLoopEnd cop cmp);
11677   effect(USE labl);
11678 
11679   ins_cost(200);
11680   format %{ "J$cop,u  $labl\t# Loop end" %}
11681   size(6);
11682   ins_encode %{
11683     Label* L = $labl$$label;
11684     __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump
11685   %}
11686   ins_pipe( pipe_jcc );
11687 %}
11688 
11689 // Jump Direct Conditional - using unsigned comparison
11690 instruct jmpConU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
11691   match(If cop cmp);
11692   effect(USE labl);
11693 
11694   ins_cost(300);
11695   format %{ "J$cop,u  $labl" %}
11696   size(6);
11697   ins_encode %{
11698     Label* L = $labl$$label;
11699     __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump
11700   %}
11701   ins_pipe(pipe_jcc);
11702 %}
11703 
11704 instruct jmpConUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
11705   match(If cop cmp);
11706   effect(USE labl);
11707 
11708   ins_cost(200);
11709   format %{ "J$cop,u  $labl" %}
11710   size(6);
11711   ins_encode %{
11712     Label* L = $labl$$label;
11713     __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump
11714   %}
11715   ins_pipe(pipe_jcc);
11716 %}
11717 
11718 instruct jmpConUCF2(cmpOpUCF2 cop, eFlagsRegUCF cmp, label labl) %{
11719   match(If cop cmp);
11720   effect(USE labl);
11721 
11722   ins_cost(200);
11723   format %{ $$template
11724     if ($cop$$cmpcode == Assembler::notEqual) {
11725       $$emit$$"JP,u   $labl\n\t"
11726       $$emit$$"J$cop,u   $labl"
11727     } else {
11728       $$emit$$"JP,u   done\n\t"
11729       $$emit$$"J$cop,u   $labl\n\t"
11730       $$emit$$"done:"
11731     }
11732   %}
11733   ins_encode %{
11734     Label* l = $labl$$label;
11735     if ($cop$$cmpcode == Assembler::notEqual) {
11736       __ jcc(Assembler::parity, *l, false);
11737       __ jcc(Assembler::notEqual, *l, false);
11738     } else if ($cop$$cmpcode == Assembler::equal) {
11739       Label done;
11740       __ jccb(Assembler::parity, done);
11741       __ jcc(Assembler::equal, *l, false);
11742       __ bind(done);
11743     } else {
11744        ShouldNotReachHere();
11745     }
11746   %}
11747   ins_pipe(pipe_jcc);
11748 %}
11749 
11750 // ============================================================================
11751 // The 2nd slow-half of a subtype check.  Scan the subklass's 2ndary superklass
11752 // array for an instance of the superklass.  Set a hidden internal cache on a
11753 // hit (cache is checked with exposed code in gen_subtype_check()).  Return
11754 // NZ for a miss or zero for a hit.  The encoding ALSO sets flags.
11755 instruct partialSubtypeCheck( eDIRegP result, eSIRegP sub, eAXRegP super, eCXRegI rcx, eFlagsReg cr ) %{
11756   match(Set result (PartialSubtypeCheck sub super));
11757   effect( KILL rcx, KILL cr );
11758 
11759   ins_cost(1100);  // slightly larger than the next version
11760   format %{ "MOV    EDI,[$sub+Klass::secondary_supers]\n\t"
11761             "MOV    ECX,[EDI+ArrayKlass::length]\t# length to scan\n\t"
11762             "ADD    EDI,ArrayKlass::base_offset\t# Skip to start of data; set NZ in case count is zero\n\t"
11763             "REPNE SCASD\t# Scan *EDI++ for a match with EAX while CX-- != 0\n\t"
11764             "JNE,s  miss\t\t# Missed: EDI not-zero\n\t"
11765             "MOV    [$sub+Klass::secondary_super_cache],$super\t# Hit: update cache\n\t"
11766             "XOR    $result,$result\t\t Hit: EDI zero\n\t"
11767      "miss:\t" %}
11768 
11769   opcode(0x1); // Force a XOR of EDI
11770   ins_encode( enc_PartialSubtypeCheck() );
11771   ins_pipe( pipe_slow );
11772 %}
11773 
11774 instruct partialSubtypeCheck_vs_Zero( eFlagsReg cr, eSIRegP sub, eAXRegP super, eCXRegI rcx, eDIRegP result, immP0 zero ) %{
11775   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
11776   effect( KILL rcx, KILL result );
11777 
11778   ins_cost(1000);
11779   format %{ "MOV    EDI,[$sub+Klass::secondary_supers]\n\t"
11780             "MOV    ECX,[EDI+ArrayKlass::length]\t# length to scan\n\t"
11781             "ADD    EDI,ArrayKlass::base_offset\t# Skip to start of data; set NZ in case count is zero\n\t"
11782             "REPNE SCASD\t# Scan *EDI++ for a match with EAX while CX-- != 0\n\t"
11783             "JNE,s  miss\t\t# Missed: flags NZ\n\t"
11784             "MOV    [$sub+Klass::secondary_super_cache],$super\t# Hit: update cache, flags Z\n\t"
11785      "miss:\t" %}
11786 
11787   opcode(0x0);  // No need to XOR EDI
11788   ins_encode( enc_PartialSubtypeCheck() );
11789   ins_pipe( pipe_slow );
11790 %}
11791 
11792 // ============================================================================
11793 // Branch Instructions -- short offset versions
11794 //
11795 // These instructions are used to replace jumps of a long offset (the default
11796 // match) with jumps of a shorter offset.  These instructions are all tagged
11797 // with the ins_short_branch attribute, which causes the ADLC to suppress the
11798 // match rules in general matching.  Instead, the ADLC generates a conversion
11799 // method in the MachNode which can be used to do in-place replacement of the
11800 // long variant with the shorter variant.  The compiler will determine if a
11801 // branch can be taken by the is_short_branch_offset() predicate in the machine
11802 // specific code section of the file.
11803 
11804 // Jump Direct - Label defines a relative address from JMP+1
11805 instruct jmpDir_short(label labl) %{
11806   match(Goto);
11807   effect(USE labl);
11808 
11809   ins_cost(300);
11810   format %{ "JMP,s  $labl" %}
11811   size(2);
11812   ins_encode %{
11813     Label* L = $labl$$label;
11814     __ jmpb(*L);
11815   %}
11816   ins_pipe( pipe_jmp );
11817   ins_short_branch(1);
11818 %}
11819 
11820 // Jump Direct Conditional - Label defines a relative address from Jcc+1
11821 instruct jmpCon_short(cmpOp cop, eFlagsReg cr, label labl) %{
11822   match(If cop cr);
11823   effect(USE labl);
11824 
11825   ins_cost(300);
11826   format %{ "J$cop,s  $labl" %}
11827   size(2);
11828   ins_encode %{
11829     Label* L = $labl$$label;
11830     __ jccb((Assembler::Condition)($cop$$cmpcode), *L);
11831   %}
11832   ins_pipe( pipe_jcc );
11833   ins_short_branch(1);
11834 %}
11835 
11836 // Jump Direct Conditional - Label defines a relative address from Jcc+1
11837 instruct jmpLoopEnd_short(cmpOp cop, eFlagsReg cr, label labl) %{
11838   match(CountedLoopEnd cop cr);
11839   effect(USE labl);
11840 
11841   ins_cost(300);
11842   format %{ "J$cop,s  $labl\t# Loop end" %}
11843   size(2);
11844   ins_encode %{
11845     Label* L = $labl$$label;
11846     __ jccb((Assembler::Condition)($cop$$cmpcode), *L);
11847   %}
11848   ins_pipe( pipe_jcc );
11849   ins_short_branch(1);
11850 %}
11851 
11852 // Jump Direct Conditional - Label defines a relative address from Jcc+1
11853 instruct jmpLoopEndU_short(cmpOpU cop, eFlagsRegU cmp, label labl) %{
11854   match(CountedLoopEnd cop cmp);
11855   effect(USE labl);
11856 
11857   ins_cost(300);
11858   format %{ "J$cop,us $labl\t# Loop end" %}
11859   size(2);
11860   ins_encode %{
11861     Label* L = $labl$$label;
11862     __ jccb((Assembler::Condition)($cop$$cmpcode), *L);
11863   %}
11864   ins_pipe( pipe_jcc );
11865   ins_short_branch(1);
11866 %}
11867 
11868 instruct jmpLoopEndUCF_short(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
11869   match(CountedLoopEnd cop cmp);
11870   effect(USE labl);
11871 
11872   ins_cost(300);
11873   format %{ "J$cop,us $labl\t# Loop end" %}
11874   size(2);
11875   ins_encode %{
11876     Label* L = $labl$$label;
11877     __ jccb((Assembler::Condition)($cop$$cmpcode), *L);
11878   %}
11879   ins_pipe( pipe_jcc );
11880   ins_short_branch(1);
11881 %}
11882 
11883 // Jump Direct Conditional - using unsigned comparison
11884 instruct jmpConU_short(cmpOpU cop, eFlagsRegU cmp, label labl) %{
11885   match(If cop cmp);
11886   effect(USE labl);
11887 
11888   ins_cost(300);
11889   format %{ "J$cop,us $labl" %}
11890   size(2);
11891   ins_encode %{
11892     Label* L = $labl$$label;
11893     __ jccb((Assembler::Condition)($cop$$cmpcode), *L);
11894   %}
11895   ins_pipe( pipe_jcc );
11896   ins_short_branch(1);
11897 %}
11898 
11899 instruct jmpConUCF_short(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
11900   match(If cop cmp);
11901   effect(USE labl);
11902 
11903   ins_cost(300);
11904   format %{ "J$cop,us $labl" %}
11905   size(2);
11906   ins_encode %{
11907     Label* L = $labl$$label;
11908     __ jccb((Assembler::Condition)($cop$$cmpcode), *L);
11909   %}
11910   ins_pipe( pipe_jcc );
11911   ins_short_branch(1);
11912 %}
11913 
11914 instruct jmpConUCF2_short(cmpOpUCF2 cop, eFlagsRegUCF cmp, label labl) %{
11915   match(If cop cmp);
11916   effect(USE labl);
11917 
11918   ins_cost(300);
11919   format %{ $$template
11920     if ($cop$$cmpcode == Assembler::notEqual) {
11921       $$emit$$"JP,u,s   $labl\n\t"
11922       $$emit$$"J$cop,u,s   $labl"
11923     } else {
11924       $$emit$$"JP,u,s   done\n\t"
11925       $$emit$$"J$cop,u,s  $labl\n\t"
11926       $$emit$$"done:"
11927     }
11928   %}
11929   size(4);
11930   ins_encode %{
11931     Label* l = $labl$$label;
11932     if ($cop$$cmpcode == Assembler::notEqual) {
11933       __ jccb(Assembler::parity, *l);
11934       __ jccb(Assembler::notEqual, *l);
11935     } else if ($cop$$cmpcode == Assembler::equal) {
11936       Label done;
11937       __ jccb(Assembler::parity, done);
11938       __ jccb(Assembler::equal, *l);
11939       __ bind(done);
11940     } else {
11941        ShouldNotReachHere();
11942     }
11943   %}
11944   ins_pipe(pipe_jcc);
11945   ins_short_branch(1);
11946 %}
11947 
11948 // ============================================================================
11949 // Long Compare
11950 //
11951 // Currently we hold longs in 2 registers.  Comparing such values efficiently
11952 // is tricky.  The flavor of compare used depends on whether we are testing
11953 // for LT, LE, or EQ.  For a simple LT test we can check just the sign bit.
11954 // The GE test is the negated LT test.  The LE test can be had by commuting
11955 // the operands (yielding a GE test) and then negating; negate again for the
11956 // GT test.  The EQ test is done by ORcc'ing the high and low halves, and the
11957 // NE test is negated from that.
11958 
11959 // Due to a shortcoming in the ADLC, it mixes up expressions like:
11960 // (foo (CmpI (CmpL X Y) 0)) and (bar (CmpI (CmpL X 0L) 0)).  Note the
11961 // difference between 'Y' and '0L'.  The tree-matches for the CmpI sections
11962 // are collapsed internally in the ADLC's dfa-gen code.  The match for
11963 // (CmpI (CmpL X Y) 0) is silently replaced with (CmpI (CmpL X 0L) 0) and the
11964 // foo match ends up with the wrong leaf.  One fix is to not match both
11965 // reg-reg and reg-zero forms of long-compare.  This is unfortunate because
11966 // both forms beat the trinary form of long-compare and both are very useful
11967 // on Intel which has so few registers.
11968 
11969 // Manifest a CmpL result in an integer register.  Very painful.
11970 // This is the test to avoid.
11971 instruct cmpL3_reg_reg(eSIRegI dst, eRegL src1, eRegL src2, eFlagsReg flags ) %{
11972   match(Set dst (CmpL3 src1 src2));
11973   effect( KILL flags );
11974   ins_cost(1000);
11975   format %{ "XOR    $dst,$dst\n\t"
11976             "CMP    $src1.hi,$src2.hi\n\t"
11977             "JLT,s  m_one\n\t"
11978             "JGT,s  p_one\n\t"
11979             "CMP    $src1.lo,$src2.lo\n\t"
11980             "JB,s   m_one\n\t"
11981             "JEQ,s  done\n"
11982     "p_one:\tINC    $dst\n\t"
11983             "JMP,s  done\n"
11984     "m_one:\tDEC    $dst\n"
11985      "done:" %}
11986   ins_encode %{
11987     Label p_one, m_one, done;
11988     __ xorptr($dst$$Register, $dst$$Register);
11989     __ cmpl(HIGH_FROM_LOW($src1$$Register), HIGH_FROM_LOW($src2$$Register));
11990     __ jccb(Assembler::less,    m_one);
11991     __ jccb(Assembler::greater, p_one);
11992     __ cmpl($src1$$Register, $src2$$Register);
11993     __ jccb(Assembler::below,   m_one);
11994     __ jccb(Assembler::equal,   done);
11995     __ bind(p_one);
11996     __ incrementl($dst$$Register);
11997     __ jmpb(done);
11998     __ bind(m_one);
11999     __ decrementl($dst$$Register);
12000     __ bind(done);
12001   %}
12002   ins_pipe( pipe_slow );
12003 %}
12004 
12005 //======
12006 // Manifest a CmpL result in the normal flags.  Only good for LT or GE
12007 // compares.  Can be used for LE or GT compares by reversing arguments.
12008 // NOT GOOD FOR EQ/NE tests.
12009 instruct cmpL_zero_flags_LTGE( flagsReg_long_LTGE flags, eRegL src, immL0 zero ) %{
12010   match( Set flags (CmpL src zero ));
12011   ins_cost(100);
12012   format %{ "TEST   $src.hi,$src.hi" %}
12013   opcode(0x85);
12014   ins_encode( OpcP, RegReg_Hi2( src, src ) );
12015   ins_pipe( ialu_cr_reg_reg );
12016 %}
12017 
12018 // Manifest a CmpL result in the normal flags.  Only good for LT or GE
12019 // compares.  Can be used for LE or GT compares by reversing arguments.
12020 // NOT GOOD FOR EQ/NE tests.
12021 instruct cmpL_reg_flags_LTGE( flagsReg_long_LTGE flags, eRegL src1, eRegL src2, rRegI tmp ) %{
12022   match( Set flags (CmpL src1 src2 ));
12023   effect( TEMP tmp );
12024   ins_cost(300);
12025   format %{ "CMP    $src1.lo,$src2.lo\t! Long compare; set flags for low bits\n\t"
12026             "MOV    $tmp,$src1.hi\n\t"
12027             "SBB    $tmp,$src2.hi\t! Compute flags for long compare" %}
12028   ins_encode( long_cmp_flags2( src1, src2, tmp ) );
12029   ins_pipe( ialu_cr_reg_reg );
12030 %}
12031 
12032 // Long compares reg < zero/req OR reg >= zero/req.
12033 // Just a wrapper for a normal branch, plus the predicate test.
12034 instruct cmpL_LTGE(cmpOp cmp, flagsReg_long_LTGE flags, label labl) %{
12035   match(If cmp flags);
12036   effect(USE labl);
12037   predicate( _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge );
12038   expand %{
12039     jmpCon(cmp,flags,labl);    // JLT or JGE...
12040   %}
12041 %}
12042 
12043 // Compare 2 longs and CMOVE longs.
12044 instruct cmovLL_reg_LTGE(cmpOp cmp, flagsReg_long_LTGE flags, eRegL dst, eRegL src) %{
12045   match(Set dst (CMoveL (Binary cmp flags) (Binary dst src)));
12046   predicate(VM_Version::supports_cmov() && ( _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge ));
12047   ins_cost(400);
12048   format %{ "CMOV$cmp $dst.lo,$src.lo\n\t"
12049             "CMOV$cmp $dst.hi,$src.hi" %}
12050   opcode(0x0F,0x40);
12051   ins_encode( enc_cmov(cmp), RegReg_Lo2( dst, src ), enc_cmov(cmp), RegReg_Hi2( dst, src ) );
12052   ins_pipe( pipe_cmov_reg_long );
12053 %}
12054 
12055 instruct cmovLL_mem_LTGE(cmpOp cmp, flagsReg_long_LTGE flags, eRegL dst, load_long_memory src) %{
12056   match(Set dst (CMoveL (Binary cmp flags) (Binary dst (LoadL src))));
12057   predicate(VM_Version::supports_cmov() && ( _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge ));
12058   ins_cost(500);
12059   format %{ "CMOV$cmp $dst.lo,$src.lo\n\t"
12060             "CMOV$cmp $dst.hi,$src.hi" %}
12061   opcode(0x0F,0x40);
12062   ins_encode( enc_cmov(cmp), RegMem(dst, src), enc_cmov(cmp), RegMem_Hi(dst, src) );
12063   ins_pipe( pipe_cmov_reg_long );
12064 %}
12065 
12066 // Compare 2 longs and CMOVE ints.
12067 instruct cmovII_reg_LTGE(cmpOp cmp, flagsReg_long_LTGE flags, rRegI dst, rRegI src) %{
12068   predicate(VM_Version::supports_cmov() && ( _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge ));
12069   match(Set dst (CMoveI (Binary cmp flags) (Binary dst src)));
12070   ins_cost(200);
12071   format %{ "CMOV$cmp $dst,$src" %}
12072   opcode(0x0F,0x40);
12073   ins_encode( enc_cmov(cmp), RegReg( dst, src ) );
12074   ins_pipe( pipe_cmov_reg );
12075 %}
12076 
12077 instruct cmovII_mem_LTGE(cmpOp cmp, flagsReg_long_LTGE flags, rRegI dst, memory src) %{
12078   predicate(VM_Version::supports_cmov() && ( _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge ));
12079   match(Set dst (CMoveI (Binary cmp flags) (Binary dst (LoadI src))));
12080   ins_cost(250);
12081   format %{ "CMOV$cmp $dst,$src" %}
12082   opcode(0x0F,0x40);
12083   ins_encode( enc_cmov(cmp), RegMem( dst, src ) );
12084   ins_pipe( pipe_cmov_mem );
12085 %}
12086 
12087 // Compare 2 longs and CMOVE ints.
12088 instruct cmovPP_reg_LTGE(cmpOp cmp, flagsReg_long_LTGE flags, eRegP dst, eRegP src) %{
12089   predicate(VM_Version::supports_cmov() && ( _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge ));
12090   match(Set dst (CMoveP (Binary cmp flags) (Binary dst src)));
12091   ins_cost(200);
12092   format %{ "CMOV$cmp $dst,$src" %}
12093   opcode(0x0F,0x40);
12094   ins_encode( enc_cmov(cmp), RegReg( dst, src ) );
12095   ins_pipe( pipe_cmov_reg );
12096 %}
12097 
12098 // Compare 2 longs and CMOVE doubles
12099 instruct cmovDDPR_reg_LTGE(cmpOp cmp, flagsReg_long_LTGE flags, regDPR dst, regDPR src) %{
12100   predicate( UseSSE<=1 && _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge );
12101   match(Set dst (CMoveD (Binary cmp flags) (Binary dst src)));
12102   ins_cost(200);
12103   expand %{
12104     fcmovDPR_regS(cmp,flags,dst,src);
12105   %}
12106 %}
12107 
12108 // Compare 2 longs and CMOVE doubles
12109 instruct cmovDD_reg_LTGE(cmpOp cmp, flagsReg_long_LTGE flags, regD dst, regD src) %{
12110   predicate( UseSSE>=2 && _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge );
12111   match(Set dst (CMoveD (Binary cmp flags) (Binary dst src)));
12112   ins_cost(200);
12113   expand %{
12114     fcmovD_regS(cmp,flags,dst,src);
12115   %}
12116 %}
12117 
12118 instruct cmovFFPR_reg_LTGE(cmpOp cmp, flagsReg_long_LTGE flags, regFPR dst, regFPR src) %{
12119   predicate( UseSSE==0 && _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge );
12120   match(Set dst (CMoveF (Binary cmp flags) (Binary dst src)));
12121   ins_cost(200);
12122   expand %{
12123     fcmovFPR_regS(cmp,flags,dst,src);
12124   %}
12125 %}
12126 
12127 instruct cmovFF_reg_LTGE(cmpOp cmp, flagsReg_long_LTGE flags, regF dst, regF src) %{
12128   predicate( UseSSE>=1 && _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge );
12129   match(Set dst (CMoveF (Binary cmp flags) (Binary dst src)));
12130   ins_cost(200);
12131   expand %{
12132     fcmovF_regS(cmp,flags,dst,src);
12133   %}
12134 %}
12135 
12136 //======
12137 // Manifest a CmpL result in the normal flags.  Only good for EQ/NE compares.
12138 instruct cmpL_zero_flags_EQNE( flagsReg_long_EQNE flags, eRegL src, immL0 zero, rRegI tmp ) %{
12139   match( Set flags (CmpL src zero ));
12140   effect(TEMP tmp);
12141   ins_cost(200);
12142   format %{ "MOV    $tmp,$src.lo\n\t"
12143             "OR     $tmp,$src.hi\t! Long is EQ/NE 0?" %}
12144   ins_encode( long_cmp_flags0( src, tmp ) );
12145   ins_pipe( ialu_reg_reg_long );
12146 %}
12147 
12148 // Manifest a CmpL result in the normal flags.  Only good for EQ/NE compares.
12149 instruct cmpL_reg_flags_EQNE( flagsReg_long_EQNE flags, eRegL src1, eRegL src2 ) %{
12150   match( Set flags (CmpL src1 src2 ));
12151   ins_cost(200+300);
12152   format %{ "CMP    $src1.lo,$src2.lo\t! Long compare; set flags for low bits\n\t"
12153             "JNE,s  skip\n\t"
12154             "CMP    $src1.hi,$src2.hi\n\t"
12155      "skip:\t" %}
12156   ins_encode( long_cmp_flags1( src1, src2 ) );
12157   ins_pipe( ialu_cr_reg_reg );
12158 %}
12159 
12160 // Long compare reg == zero/reg OR reg != zero/reg
12161 // Just a wrapper for a normal branch, plus the predicate test.
12162 instruct cmpL_EQNE(cmpOp cmp, flagsReg_long_EQNE flags, label labl) %{
12163   match(If cmp flags);
12164   effect(USE labl);
12165   predicate( _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne );
12166   expand %{
12167     jmpCon(cmp,flags,labl);    // JEQ or JNE...
12168   %}
12169 %}
12170 
12171 // Compare 2 longs and CMOVE longs.
12172 instruct cmovLL_reg_EQNE(cmpOp cmp, flagsReg_long_EQNE flags, eRegL dst, eRegL src) %{
12173   match(Set dst (CMoveL (Binary cmp flags) (Binary dst src)));
12174   predicate(VM_Version::supports_cmov() && ( _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ));
12175   ins_cost(400);
12176   format %{ "CMOV$cmp $dst.lo,$src.lo\n\t"
12177             "CMOV$cmp $dst.hi,$src.hi" %}
12178   opcode(0x0F,0x40);
12179   ins_encode( enc_cmov(cmp), RegReg_Lo2( dst, src ), enc_cmov(cmp), RegReg_Hi2( dst, src ) );
12180   ins_pipe( pipe_cmov_reg_long );
12181 %}
12182 
12183 instruct cmovLL_mem_EQNE(cmpOp cmp, flagsReg_long_EQNE flags, eRegL dst, load_long_memory src) %{
12184   match(Set dst (CMoveL (Binary cmp flags) (Binary dst (LoadL src))));
12185   predicate(VM_Version::supports_cmov() && ( _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ));
12186   ins_cost(500);
12187   format %{ "CMOV$cmp $dst.lo,$src.lo\n\t"
12188             "CMOV$cmp $dst.hi,$src.hi" %}
12189   opcode(0x0F,0x40);
12190   ins_encode( enc_cmov(cmp), RegMem(dst, src), enc_cmov(cmp), RegMem_Hi(dst, src) );
12191   ins_pipe( pipe_cmov_reg_long );
12192 %}
12193 
12194 // Compare 2 longs and CMOVE ints.
12195 instruct cmovII_reg_EQNE(cmpOp cmp, flagsReg_long_EQNE flags, rRegI dst, rRegI src) %{
12196   predicate(VM_Version::supports_cmov() && ( _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ));
12197   match(Set dst (CMoveI (Binary cmp flags) (Binary dst src)));
12198   ins_cost(200);
12199   format %{ "CMOV$cmp $dst,$src" %}
12200   opcode(0x0F,0x40);
12201   ins_encode( enc_cmov(cmp), RegReg( dst, src ) );
12202   ins_pipe( pipe_cmov_reg );
12203 %}
12204 
12205 instruct cmovII_mem_EQNE(cmpOp cmp, flagsReg_long_EQNE flags, rRegI dst, memory src) %{
12206   predicate(VM_Version::supports_cmov() && ( _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ));
12207   match(Set dst (CMoveI (Binary cmp flags) (Binary dst (LoadI src))));
12208   ins_cost(250);
12209   format %{ "CMOV$cmp $dst,$src" %}
12210   opcode(0x0F,0x40);
12211   ins_encode( enc_cmov(cmp), RegMem( dst, src ) );
12212   ins_pipe( pipe_cmov_mem );
12213 %}
12214 
12215 // Compare 2 longs and CMOVE ints.
12216 instruct cmovPP_reg_EQNE(cmpOp cmp, flagsReg_long_EQNE flags, eRegP dst, eRegP src) %{
12217   predicate(VM_Version::supports_cmov() && ( _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ));
12218   match(Set dst (CMoveP (Binary cmp flags) (Binary dst src)));
12219   ins_cost(200);
12220   format %{ "CMOV$cmp $dst,$src" %}
12221   opcode(0x0F,0x40);
12222   ins_encode( enc_cmov(cmp), RegReg( dst, src ) );
12223   ins_pipe( pipe_cmov_reg );
12224 %}
12225 
12226 // Compare 2 longs and CMOVE doubles
12227 instruct cmovDDPR_reg_EQNE(cmpOp cmp, flagsReg_long_EQNE flags, regDPR dst, regDPR src) %{
12228   predicate( UseSSE<=1 && _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne );
12229   match(Set dst (CMoveD (Binary cmp flags) (Binary dst src)));
12230   ins_cost(200);
12231   expand %{
12232     fcmovDPR_regS(cmp,flags,dst,src);
12233   %}
12234 %}
12235 
12236 // Compare 2 longs and CMOVE doubles
12237 instruct cmovDD_reg_EQNE(cmpOp cmp, flagsReg_long_EQNE flags, regD dst, regD src) %{
12238   predicate( UseSSE>=2 && _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne );
12239   match(Set dst (CMoveD (Binary cmp flags) (Binary dst src)));
12240   ins_cost(200);
12241   expand %{
12242     fcmovD_regS(cmp,flags,dst,src);
12243   %}
12244 %}
12245 
12246 instruct cmovFFPR_reg_EQNE(cmpOp cmp, flagsReg_long_EQNE flags, regFPR dst, regFPR src) %{
12247   predicate( UseSSE==0 && _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne );
12248   match(Set dst (CMoveF (Binary cmp flags) (Binary dst src)));
12249   ins_cost(200);
12250   expand %{
12251     fcmovFPR_regS(cmp,flags,dst,src);
12252   %}
12253 %}
12254 
12255 instruct cmovFF_reg_EQNE(cmpOp cmp, flagsReg_long_EQNE flags, regF dst, regF src) %{
12256   predicate( UseSSE>=1 && _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne );
12257   match(Set dst (CMoveF (Binary cmp flags) (Binary dst src)));
12258   ins_cost(200);
12259   expand %{
12260     fcmovF_regS(cmp,flags,dst,src);
12261   %}
12262 %}
12263 
12264 //======
12265 // Manifest a CmpL result in the normal flags.  Only good for LE or GT compares.
12266 // Same as cmpL_reg_flags_LEGT except must negate src
12267 instruct cmpL_zero_flags_LEGT( flagsReg_long_LEGT flags, eRegL src, immL0 zero, rRegI tmp ) %{
12268   match( Set flags (CmpL src zero ));
12269   effect( TEMP tmp );
12270   ins_cost(300);
12271   format %{ "XOR    $tmp,$tmp\t# Long compare for -$src < 0, use commuted test\n\t"
12272             "CMP    $tmp,$src.lo\n\t"
12273             "SBB    $tmp,$src.hi\n\t" %}
12274   ins_encode( long_cmp_flags3(src, tmp) );
12275   ins_pipe( ialu_reg_reg_long );
12276 %}
12277 
12278 // Manifest a CmpL result in the normal flags.  Only good for LE or GT compares.
12279 // Same as cmpL_reg_flags_LTGE except operands swapped.  Swapping operands
12280 // requires a commuted test to get the same result.
12281 instruct cmpL_reg_flags_LEGT( flagsReg_long_LEGT flags, eRegL src1, eRegL src2, rRegI tmp ) %{
12282   match( Set flags (CmpL src1 src2 ));
12283   effect( TEMP tmp );
12284   ins_cost(300);
12285   format %{ "CMP    $src2.lo,$src1.lo\t! Long compare, swapped operands, use with commuted test\n\t"
12286             "MOV    $tmp,$src2.hi\n\t"
12287             "SBB    $tmp,$src1.hi\t! Compute flags for long compare" %}
12288   ins_encode( long_cmp_flags2( src2, src1, tmp ) );
12289   ins_pipe( ialu_cr_reg_reg );
12290 %}
12291 
12292 // Long compares reg < zero/req OR reg >= zero/req.
12293 // Just a wrapper for a normal branch, plus the predicate test
12294 instruct cmpL_LEGT(cmpOp_commute cmp, flagsReg_long_LEGT flags, label labl) %{
12295   match(If cmp flags);
12296   effect(USE labl);
12297   predicate( _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt || _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le );
12298   ins_cost(300);
12299   expand %{
12300     jmpCon(cmp,flags,labl);    // JGT or JLE...
12301   %}
12302 %}
12303 
12304 // Compare 2 longs and CMOVE longs.
12305 instruct cmovLL_reg_LEGT(cmpOp_commute cmp, flagsReg_long_LEGT flags, eRegL dst, eRegL src) %{
12306   match(Set dst (CMoveL (Binary cmp flags) (Binary dst src)));
12307   predicate(VM_Version::supports_cmov() && ( _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt ));
12308   ins_cost(400);
12309   format %{ "CMOV$cmp $dst.lo,$src.lo\n\t"
12310             "CMOV$cmp $dst.hi,$src.hi" %}
12311   opcode(0x0F,0x40);
12312   ins_encode( enc_cmov(cmp), RegReg_Lo2( dst, src ), enc_cmov(cmp), RegReg_Hi2( dst, src ) );
12313   ins_pipe( pipe_cmov_reg_long );
12314 %}
12315 
12316 instruct cmovLL_mem_LEGT(cmpOp_commute cmp, flagsReg_long_LEGT flags, eRegL dst, load_long_memory src) %{
12317   match(Set dst (CMoveL (Binary cmp flags) (Binary dst (LoadL src))));
12318   predicate(VM_Version::supports_cmov() && ( _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt ));
12319   ins_cost(500);
12320   format %{ "CMOV$cmp $dst.lo,$src.lo\n\t"
12321             "CMOV$cmp $dst.hi,$src.hi+4" %}
12322   opcode(0x0F,0x40);
12323   ins_encode( enc_cmov(cmp), RegMem(dst, src), enc_cmov(cmp), RegMem_Hi(dst, src) );
12324   ins_pipe( pipe_cmov_reg_long );
12325 %}
12326 
12327 // Compare 2 longs and CMOVE ints.
12328 instruct cmovII_reg_LEGT(cmpOp_commute cmp, flagsReg_long_LEGT flags, rRegI dst, rRegI src) %{
12329   predicate(VM_Version::supports_cmov() && ( _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt ));
12330   match(Set dst (CMoveI (Binary cmp flags) (Binary dst src)));
12331   ins_cost(200);
12332   format %{ "CMOV$cmp $dst,$src" %}
12333   opcode(0x0F,0x40);
12334   ins_encode( enc_cmov(cmp), RegReg( dst, src ) );
12335   ins_pipe( pipe_cmov_reg );
12336 %}
12337 
12338 instruct cmovII_mem_LEGT(cmpOp_commute cmp, flagsReg_long_LEGT flags, rRegI dst, memory src) %{
12339   predicate(VM_Version::supports_cmov() && ( _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt ));
12340   match(Set dst (CMoveI (Binary cmp flags) (Binary dst (LoadI src))));
12341   ins_cost(250);
12342   format %{ "CMOV$cmp $dst,$src" %}
12343   opcode(0x0F,0x40);
12344   ins_encode( enc_cmov(cmp), RegMem( dst, src ) );
12345   ins_pipe( pipe_cmov_mem );
12346 %}
12347 
12348 // Compare 2 longs and CMOVE ptrs.
12349 instruct cmovPP_reg_LEGT(cmpOp_commute cmp, flagsReg_long_LEGT flags, eRegP dst, eRegP src) %{
12350   predicate(VM_Version::supports_cmov() && ( _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt ));
12351   match(Set dst (CMoveP (Binary cmp flags) (Binary dst src)));
12352   ins_cost(200);
12353   format %{ "CMOV$cmp $dst,$src" %}
12354   opcode(0x0F,0x40);
12355   ins_encode( enc_cmov(cmp), RegReg( dst, src ) );
12356   ins_pipe( pipe_cmov_reg );
12357 %}
12358 
12359 // Compare 2 longs and CMOVE doubles
12360 instruct cmovDDPR_reg_LEGT(cmpOp_commute cmp, flagsReg_long_LEGT flags, regDPR dst, regDPR src) %{
12361   predicate( UseSSE<=1 && _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt );
12362   match(Set dst (CMoveD (Binary cmp flags) (Binary dst src)));
12363   ins_cost(200);
12364   expand %{
12365     fcmovDPR_regS(cmp,flags,dst,src);
12366   %}
12367 %}
12368 
12369 // Compare 2 longs and CMOVE doubles
12370 instruct cmovDD_reg_LEGT(cmpOp_commute cmp, flagsReg_long_LEGT flags, regD dst, regD src) %{
12371   predicate( UseSSE>=2 && _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt );
12372   match(Set dst (CMoveD (Binary cmp flags) (Binary dst src)));
12373   ins_cost(200);
12374   expand %{
12375     fcmovD_regS(cmp,flags,dst,src);
12376   %}
12377 %}
12378 
12379 instruct cmovFFPR_reg_LEGT(cmpOp_commute cmp, flagsReg_long_LEGT flags, regFPR dst, regFPR src) %{
12380   predicate( UseSSE==0 && _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt );
12381   match(Set dst (CMoveF (Binary cmp flags) (Binary dst src)));
12382   ins_cost(200);
12383   expand %{
12384     fcmovFPR_regS(cmp,flags,dst,src);
12385   %}
12386 %}
12387 
12388 
12389 instruct cmovFF_reg_LEGT(cmpOp_commute cmp, flagsReg_long_LEGT flags, regF dst, regF src) %{
12390   predicate( UseSSE>=1 && _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt );
12391   match(Set dst (CMoveF (Binary cmp flags) (Binary dst src)));
12392   ins_cost(200);
12393   expand %{
12394     fcmovF_regS(cmp,flags,dst,src);
12395   %}
12396 %}
12397 
12398 
12399 // ============================================================================
12400 // Procedure Call/Return Instructions
12401 // Call Java Static Instruction
12402 // Note: If this code changes, the corresponding ret_addr_offset() and
12403 //       compute_padding() functions will have to be adjusted.
12404 instruct CallStaticJavaDirect(method meth) %{
12405   match(CallStaticJava);
12406   predicate(! ((CallStaticJavaNode*)n)->is_method_handle_invoke());
12407   effect(USE meth);
12408 
12409   ins_cost(300);
12410   format %{ "CALL,static " %}
12411   opcode(0xE8); /* E8 cd */
12412   ins_encode( pre_call_resets,
12413               Java_Static_Call( meth ),
12414               call_epilog,
12415               post_call_FPU );
12416   ins_pipe( pipe_slow );
12417   ins_alignment(4);
12418 %}
12419 
12420 // Call Java Static Instruction (method handle version)
12421 // Note: If this code changes, the corresponding ret_addr_offset() and
12422 //       compute_padding() functions will have to be adjusted.
12423 instruct CallStaticJavaHandle(method meth, eBPRegP ebp_mh_SP_save) %{
12424   match(CallStaticJava);
12425   predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
12426   effect(USE meth);
12427   // EBP is saved by all callees (for interpreter stack correction).
12428   // We use it here for a similar purpose, in {preserve,restore}_SP.
12429 
12430   ins_cost(300);
12431   format %{ "CALL,static/MethodHandle " %}
12432   opcode(0xE8); /* E8 cd */
12433   ins_encode( pre_call_resets,
12434               preserve_SP,
12435               Java_Static_Call( meth ),
12436               restore_SP,
12437               call_epilog,
12438               post_call_FPU );
12439   ins_pipe( pipe_slow );
12440   ins_alignment(4);
12441 %}
12442 
12443 // Call Java Dynamic Instruction
12444 // Note: If this code changes, the corresponding ret_addr_offset() and
12445 //       compute_padding() functions will have to be adjusted.
12446 instruct CallDynamicJavaDirect(method meth) %{
12447   match(CallDynamicJava);
12448   effect(USE meth);
12449 
12450   ins_cost(300);
12451   format %{ "MOV    EAX,(oop)-1\n\t"
12452             "CALL,dynamic" %}
12453   opcode(0xE8); /* E8 cd */
12454   ins_encode( pre_call_resets,
12455               Java_Dynamic_Call( meth ),
12456               call_epilog,
12457               post_call_FPU );
12458   ins_pipe( pipe_slow );
12459   ins_alignment(4);
12460 %}
12461 
12462 // Call Runtime Instruction
12463 instruct CallRuntimeDirect(method meth) %{
12464   match(CallRuntime );
12465   effect(USE meth);
12466 
12467   ins_cost(300);
12468   format %{ "CALL,runtime " %}
12469   opcode(0xE8); /* E8 cd */
12470   // Use FFREEs to clear entries in float stack
12471   ins_encode( pre_call_resets,
12472               FFree_Float_Stack_All,
12473               Java_To_Runtime( meth ),
12474               post_call_FPU );
12475   ins_pipe( pipe_slow );
12476 %}
12477 
12478 // Call runtime without safepoint
12479 instruct CallLeafDirect(method meth) %{
12480   match(CallLeaf);
12481   effect(USE meth);
12482 
12483   ins_cost(300);
12484   format %{ "CALL_LEAF,runtime " %}
12485   opcode(0xE8); /* E8 cd */
12486   ins_encode( pre_call_resets,
12487               FFree_Float_Stack_All,
12488               Java_To_Runtime( meth ),
12489               Verify_FPU_For_Leaf, post_call_FPU );
12490   ins_pipe( pipe_slow );
12491 %}
12492 
12493 instruct CallLeafNoFPDirect(method meth) %{
12494   match(CallLeafNoFP);
12495   effect(USE meth);
12496 
12497   ins_cost(300);
12498   format %{ "CALL_LEAF_NOFP,runtime " %}
12499   opcode(0xE8); /* E8 cd */
12500   ins_encode(Java_To_Runtime(meth));
12501   ins_pipe( pipe_slow );
12502 %}
12503 
12504 
12505 // Return Instruction
12506 // Remove the return address & jump to it.
12507 instruct Ret() %{
12508   match(Return);
12509   format %{ "RET" %}
12510   opcode(0xC3);
12511   ins_encode(OpcP);
12512   ins_pipe( pipe_jmp );
12513 %}
12514 
12515 // Tail Call; Jump from runtime stub to Java code.
12516 // Also known as an 'interprocedural jump'.
12517 // Target of jump will eventually return to caller.
12518 // TailJump below removes the return address.
12519 instruct TailCalljmpInd(eRegP_no_EBP jump_target, eBXRegP method_oop) %{
12520   match(TailCall jump_target method_oop );
12521   ins_cost(300);
12522   format %{ "JMP    $jump_target \t# EBX holds method oop" %}
12523   opcode(0xFF, 0x4);  /* Opcode FF /4 */
12524   ins_encode( OpcP, RegOpc(jump_target) );
12525   ins_pipe( pipe_jmp );
12526 %}
12527 
12528 
12529 // Tail Jump; remove the return address; jump to target.
12530 // TailCall above leaves the return address around.
12531 instruct tailjmpInd(eRegP_no_EBP jump_target, eAXRegP ex_oop) %{
12532   match( TailJump jump_target ex_oop );
12533   ins_cost(300);
12534   format %{ "POP    EDX\t# pop return address into dummy\n\t"
12535             "JMP    $jump_target " %}
12536   opcode(0xFF, 0x4);  /* Opcode FF /4 */
12537   ins_encode( enc_pop_rdx,
12538               OpcP, RegOpc(jump_target) );
12539   ins_pipe( pipe_jmp );
12540 %}
12541 
12542 // Create exception oop: created by stack-crawling runtime code.
12543 // Created exception is now available to this handler, and is setup
12544 // just prior to jumping to this handler.  No code emitted.
12545 instruct CreateException( eAXRegP ex_oop )
12546 %{
12547   match(Set ex_oop (CreateEx));
12548 
12549   size(0);
12550   // use the following format syntax
12551   format %{ "# exception oop is in EAX; no code emitted" %}
12552   ins_encode();
12553   ins_pipe( empty );
12554 %}
12555 
12556 
12557 // Rethrow exception:
12558 // The exception oop will come in the first argument position.
12559 // Then JUMP (not call) to the rethrow stub code.
12560 instruct RethrowException()
12561 %{
12562   match(Rethrow);
12563 
12564   // use the following format syntax
12565   format %{ "JMP    rethrow_stub" %}
12566   ins_encode(enc_rethrow);
12567   ins_pipe( pipe_jmp );
12568 %}
12569 
12570 // inlined locking and unlocking
12571 
12572 instruct cmpFastLock(eFlagsReg cr, eRegP object, eBXRegP box, eAXRegI tmp, eRegP scr) %{
12573   match(Set cr (FastLock object box));
12574   effect(TEMP tmp, TEMP scr, USE_KILL box);
12575   ins_cost(300);
12576   format %{ "FASTLOCK $object,$box\t! kills $box,$tmp,$scr" %}
12577   ins_encode %{
12578     __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register, _counters);
12579   %}
12580   ins_pipe(pipe_slow);
12581 %}
12582 
12583 instruct cmpFastUnlock(eFlagsReg cr, eRegP object, eAXRegP box, eRegP tmp ) %{
12584   match(Set cr (FastUnlock object box));
12585   effect(TEMP tmp, USE_KILL box);
12586   ins_cost(300);
12587   format %{ "FASTUNLOCK $object,$box\t! kills $box,$tmp" %}
12588   ins_encode %{
12589     __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
12590   %}
12591   ins_pipe(pipe_slow);
12592 %}
12593 
12594 
12595 
12596 // ============================================================================
12597 // Safepoint Instruction
12598 instruct safePoint_poll(eFlagsReg cr) %{
12599   match(SafePoint);
12600   effect(KILL cr);
12601 
12602   // TODO-FIXME: we currently poll at offset 0 of the safepoint polling page.
12603   // On SPARC that might be acceptable as we can generate the address with
12604   // just a sethi, saving an or.  By polling at offset 0 we can end up
12605   // putting additional pressure on the index-0 in the D$.  Because of
12606   // alignment (just like the situation at hand) the lower indices tend
12607   // to see more traffic.  It'd be better to change the polling address
12608   // to offset 0 of the last $line in the polling page.
12609 
12610   format %{ "TSTL   #polladdr,EAX\t! Safepoint: poll for GC" %}
12611   ins_cost(125);
12612   size(6) ;
12613   ins_encode( Safepoint_Poll() );
12614   ins_pipe( ialu_reg_mem );
12615 %}
12616 
12617 
12618 // ============================================================================
12619 // This name is KNOWN by the ADLC and cannot be changed.
12620 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
12621 // for this guy.
12622 instruct tlsLoadP(eRegP dst, eFlagsReg cr) %{
12623   match(Set dst (ThreadLocal));
12624   effect(DEF dst, KILL cr);
12625 
12626   format %{ "MOV    $dst, Thread::current()" %}
12627   ins_encode %{
12628     Register dstReg = as_Register($dst$$reg);
12629     __ get_thread(dstReg);
12630   %}
12631   ins_pipe( ialu_reg_fat );
12632 %}
12633 
12634 
12635 
12636 //----------PEEPHOLE RULES-----------------------------------------------------
12637 // These must follow all instruction definitions as they use the names
12638 // defined in the instructions definitions.
12639 //
12640 // peepmatch ( root_instr_name [preceding_instruction]* );
12641 //
12642 // peepconstraint %{
12643 // (instruction_number.operand_name relational_op instruction_number.operand_name
12644 //  [, ...] );
12645 // // instruction numbers are zero-based using left to right order in peepmatch
12646 //
12647 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
12648 // // provide an instruction_number.operand_name for each operand that appears
12649 // // in the replacement instruction's match rule
12650 //
12651 // ---------VM FLAGS---------------------------------------------------------
12652 //
12653 // All peephole optimizations can be turned off using -XX:-OptoPeephole
12654 //
12655 // Each peephole rule is given an identifying number starting with zero and
12656 // increasing by one in the order seen by the parser.  An individual peephole
12657 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
12658 // on the command-line.
12659 //
12660 // ---------CURRENT LIMITATIONS----------------------------------------------
12661 //
12662 // Only match adjacent instructions in same basic block
12663 // Only equality constraints
12664 // Only constraints between operands, not (0.dest_reg == EAX_enc)
12665 // Only one replacement instruction
12666 //
12667 // ---------EXAMPLE----------------------------------------------------------
12668 //
12669 // // pertinent parts of existing instructions in architecture description
12670 // instruct movI(rRegI dst, rRegI src) %{
12671 //   match(Set dst (CopyI src));
12672 // %}
12673 //
12674 // instruct incI_eReg(rRegI dst, immI1 src, eFlagsReg cr) %{
12675 //   match(Set dst (AddI dst src));
12676 //   effect(KILL cr);
12677 // %}
12678 //
12679 // // Change (inc mov) to lea
12680 // peephole %{
12681 //   // increment preceeded by register-register move
12682 //   peepmatch ( incI_eReg movI );
12683 //   // require that the destination register of the increment
12684 //   // match the destination register of the move
12685 //   peepconstraint ( 0.dst == 1.dst );
12686 //   // construct a replacement instruction that sets
12687 //   // the destination to ( move's source register + one )
12688 //   peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
12689 // %}
12690 //
12691 // Implementation no longer uses movX instructions since
12692 // machine-independent system no longer uses CopyX nodes.
12693 //
12694 // peephole %{
12695 //   peepmatch ( incI_eReg movI );
12696 //   peepconstraint ( 0.dst == 1.dst );
12697 //   peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
12698 // %}
12699 //
12700 // peephole %{
12701 //   peepmatch ( decI_eReg movI );
12702 //   peepconstraint ( 0.dst == 1.dst );
12703 //   peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
12704 // %}
12705 //
12706 // peephole %{
12707 //   peepmatch ( addI_eReg_imm movI );
12708 //   peepconstraint ( 0.dst == 1.dst );
12709 //   peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
12710 // %}
12711 //
12712 // peephole %{
12713 //   peepmatch ( addP_eReg_imm movP );
12714 //   peepconstraint ( 0.dst == 1.dst );
12715 //   peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
12716 // %}
12717 
12718 // // Change load of spilled value to only a spill
12719 // instruct storeI(memory mem, rRegI src) %{
12720 //   match(Set mem (StoreI mem src));
12721 // %}
12722 //
12723 // instruct loadI(rRegI dst, memory mem) %{
12724 //   match(Set dst (LoadI mem));
12725 // %}
12726 //
12727 peephole %{
12728   peepmatch ( loadI storeI );
12729   peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
12730   peepreplace ( storeI( 1.mem 1.mem 1.src ) );
12731 %}
12732 
12733 //----------SMARTSPILL RULES---------------------------------------------------
12734 // These must follow all instruction definitions as they use the names
12735 // defined in the instructions definitions.