1 //
   2 // Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
   3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4 //
   5 // This code is free software; you can redistribute it and/or modify it
   6 // under the terms of the GNU General Public License version 2 only, as
   7 // published by the Free Software Foundation.
   8 //
   9 // This code is distributed in the hope that it will be useful, but WITHOUT
  10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12 // version 2 for more details (a copy is included in the LICENSE file that
  13 // accompanied this code).
  14 //
  15 // You should have received a copy of the GNU General Public License version
  16 // 2 along with this work; if not, write to the Free Software Foundation,
  17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18 //
  19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20 // or visit www.oracle.com if you need additional information or have any
  21 // questions.
  22 //
  23 //
  24 
  25 // X86 Architecture Description File
  26 
  27 //----------REGISTER DEFINITION BLOCK------------------------------------------
  28 // This information is used by the matcher and the register allocator to
  29 // describe individual registers and classes of registers within the target
  30 // archtecture.
  31 
  32 register %{
  33 //----------Architecture Description Register Definitions----------------------
  34 // General Registers
  35 // "reg_def"  name ( register save type, C convention save type,
  36 //                   ideal register type, encoding );
  37 // Register Save Types:
  38 //
  39 // NS  = No-Save:       The register allocator assumes that these registers
  40 //                      can be used without saving upon entry to the method, &
  41 //                      that they do not need to be saved at call sites.
  42 //
  43 // SOC = Save-On-Call:  The register allocator assumes that these registers
  44 //                      can be used without saving upon entry to the method,
  45 //                      but that they must be saved at call sites.
  46 //
  47 // SOE = Save-On-Entry: The register allocator assumes that these registers
  48 //                      must be saved before using them upon entry to the
  49 //                      method, but they do not need to be saved at call
  50 //                      sites.
  51 //
  52 // AS  = Always-Save:   The register allocator assumes that these registers
  53 //                      must be saved before using them upon entry to the
  54 //                      method, & that they must be saved at call sites.
  55 //
  56 // Ideal Register Type is used to determine how to save & restore a
  57 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  58 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  59 //
  60 // The encoding number is the actual bit-pattern placed into the opcodes.
  61 
  62 // General Registers
  63 // Previously set EBX, ESI, and EDI as save-on-entry for java code
  64 // Turn off SOE in java-code due to frequent use of uncommon-traps.
  65 // Now that allocator is better, turn on ESI and EDI as SOE registers.
  66 
  67 reg_def EBX(SOC, SOE, Op_RegI, 3, rbx->as_VMReg());
  68 reg_def ECX(SOC, SOC, Op_RegI, 1, rcx->as_VMReg());
  69 reg_def ESI(SOC, SOE, Op_RegI, 6, rsi->as_VMReg());
  70 reg_def EDI(SOC, SOE, Op_RegI, 7, rdi->as_VMReg());
  71 // now that adapter frames are gone EBP is always saved and restored by the prolog/epilog code
  72 reg_def EBP(NS, SOE, Op_RegI, 5, rbp->as_VMReg());
  73 reg_def EDX(SOC, SOC, Op_RegI, 2, rdx->as_VMReg());
  74 reg_def EAX(SOC, SOC, Op_RegI, 0, rax->as_VMReg());
  75 reg_def ESP( NS,  NS, Op_RegI, 4, rsp->as_VMReg());
  76 
  77 // Float registers.  We treat TOS/FPR0 special.  It is invisible to the
  78 // allocator, and only shows up in the encodings.
  79 reg_def FPR0L( SOC, SOC, Op_RegF, 0, VMRegImpl::Bad());
  80 reg_def FPR0H( SOC, SOC, Op_RegF, 0, VMRegImpl::Bad());
  81 // Ok so here's the trick FPR1 is really st(0) except in the midst
  82 // of emission of assembly for a machnode. During the emission the fpu stack
  83 // is pushed making FPR1 == st(1) temporarily. However at any safepoint
  84 // the stack will not have this element so FPR1 == st(0) from the
  85 // oopMap viewpoint. This same weirdness with numbering causes
  86 // instruction encoding to have to play games with the register
  87 // encode to correct for this 0/1 issue. See MachSpillCopyNode::implementation
  88 // where it does flt->flt moves to see an example
  89 //
  90 reg_def FPR1L( SOC, SOC, Op_RegF, 1, as_FloatRegister(0)->as_VMReg());
  91 reg_def FPR1H( SOC, SOC, Op_RegF, 1, as_FloatRegister(0)->as_VMReg()->next());
  92 reg_def FPR2L( SOC, SOC, Op_RegF, 2, as_FloatRegister(1)->as_VMReg());
  93 reg_def FPR2H( SOC, SOC, Op_RegF, 2, as_FloatRegister(1)->as_VMReg()->next());
  94 reg_def FPR3L( SOC, SOC, Op_RegF, 3, as_FloatRegister(2)->as_VMReg());
  95 reg_def FPR3H( SOC, SOC, Op_RegF, 3, as_FloatRegister(2)->as_VMReg()->next());
  96 reg_def FPR4L( SOC, SOC, Op_RegF, 4, as_FloatRegister(3)->as_VMReg());
  97 reg_def FPR4H( SOC, SOC, Op_RegF, 4, as_FloatRegister(3)->as_VMReg()->next());
  98 reg_def FPR5L( SOC, SOC, Op_RegF, 5, as_FloatRegister(4)->as_VMReg());
  99 reg_def FPR5H( SOC, SOC, Op_RegF, 5, as_FloatRegister(4)->as_VMReg()->next());
 100 reg_def FPR6L( SOC, SOC, Op_RegF, 6, as_FloatRegister(5)->as_VMReg());
 101 reg_def FPR6H( SOC, SOC, Op_RegF, 6, as_FloatRegister(5)->as_VMReg()->next());
 102 reg_def FPR7L( SOC, SOC, Op_RegF, 7, as_FloatRegister(6)->as_VMReg());
 103 reg_def FPR7H( SOC, SOC, Op_RegF, 7, as_FloatRegister(6)->as_VMReg()->next());
 104 
 105 // Specify priority of register selection within phases of register
 106 // allocation.  Highest priority is first.  A useful heuristic is to
 107 // give registers a low priority when they are required by machine
 108 // instructions, like EAX and EDX.  Registers which are used as
 109 // pairs must fall on an even boundary (witness the FPR#L's in this list).
 110 // For the Intel integer registers, the equivalent Long pairs are
 111 // EDX:EAX, EBX:ECX, and EDI:EBP.
 112 alloc_class chunk0( ECX,   EBX,   EBP,   EDI,   EAX,   EDX,   ESI, ESP,
 113                     FPR0L, FPR0H, FPR1L, FPR1H, FPR2L, FPR2H,
 114                     FPR3L, FPR3H, FPR4L, FPR4H, FPR5L, FPR5H,
 115                     FPR6L, FPR6H, FPR7L, FPR7H );
 116 
 117 
 118 //----------Architecture Description Register Classes--------------------------
 119 // Several register classes are automatically defined based upon information in
 120 // this architecture description.
 121 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 122 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 123 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 124 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 125 //
 126 // Class for no registers (empty set).
 127 reg_class no_reg();
 128 
 129 // Class for all registers
 130 reg_class any_reg_with_ebp(EAX, EDX, EBP, EDI, ESI, ECX, EBX, ESP);
 131 // Class for all registers (excluding EBP)
 132 reg_class any_reg_no_ebp(EAX, EDX, EDI, ESI, ECX, EBX, ESP);
 133 // Dynamic register class that selects at runtime between register classes
 134 // any_reg and any_no_ebp_reg (depending on the value of the flag PreserveFramePointer). 
 135 // Equivalent to: return PreserveFramePointer ? any_no_ebp_reg : any_reg;
 136 reg_class_dynamic any_reg(any_reg_no_ebp, any_reg_with_ebp, %{ PreserveFramePointer %});
 137 
 138 // Class for general registers
 139 reg_class int_reg_with_ebp(EAX, EDX, EBP, EDI, ESI, ECX, EBX);
 140 // Class for general registers (excluding EBP).
 141 // This register class can be used for implicit null checks on win95.
 142 // It is also safe for use by tailjumps (we don't want to allocate in ebp).
 143 // Used also if the PreserveFramePointer flag is true.
 144 reg_class int_reg_no_ebp(EAX, EDX, EDI, ESI, ECX, EBX);
 145 // Dynamic register class that selects between int_reg and int_reg_no_ebp.
 146 reg_class_dynamic int_reg(int_reg_no_ebp, int_reg_with_ebp, %{ PreserveFramePointer %});
 147 
 148 // Class of "X" registers
 149 reg_class int_x_reg(EBX, ECX, EDX, EAX);
 150 
 151 // Class of registers that can appear in an address with no offset.
 152 // EBP and ESP require an extra instruction byte for zero offset.
 153 // Used in fast-unlock
 154 reg_class p_reg(EDX, EDI, ESI, EBX);
 155 
 156 // Class for general registers excluding ECX
 157 reg_class ncx_reg_with_ebp(EAX, EDX, EBP, EDI, ESI, EBX);
 158 // Class for general registers excluding ECX (and EBP)
 159 reg_class ncx_reg_no_ebp(EAX, EDX, EDI, ESI, EBX);
 160 // Dynamic register class that selects between ncx_reg and ncx_reg_no_ebp.
 161 reg_class_dynamic ncx_reg(ncx_reg_no_ebp, ncx_reg_with_ebp, %{ PreserveFramePointer %});
 162 
 163 // Class for general registers excluding EAX
 164 reg_class nax_reg(EDX, EDI, ESI, ECX, EBX);
 165 
 166 // Class for general registers excluding EAX and EBX.
 167 reg_class nabx_reg_with_ebp(EDX, EDI, ESI, ECX, EBP);
 168 // Class for general registers excluding EAX and EBX (and EBP)
 169 reg_class nabx_reg_no_ebp(EDX, EDI, ESI, ECX);
 170 // Dynamic register class that selects between nabx_reg and nabx_reg_no_ebp.
 171 reg_class_dynamic nabx_reg(nabx_reg_no_ebp, nabx_reg_with_ebp, %{ PreserveFramePointer %});
 172 
 173 // Class of EAX (for multiply and divide operations)
 174 reg_class eax_reg(EAX);
 175 
 176 // Class of EBX (for atomic add)
 177 reg_class ebx_reg(EBX);
 178 
 179 // Class of ECX (for shift and JCXZ operations and cmpLTMask)
 180 reg_class ecx_reg(ECX);
 181 
 182 // Class of EDX (for multiply and divide operations)
 183 reg_class edx_reg(EDX);
 184 
 185 // Class of EDI (for synchronization)
 186 reg_class edi_reg(EDI);
 187 
 188 // Class of ESI (for synchronization)
 189 reg_class esi_reg(ESI);
 190 
 191 // Singleton class for stack pointer
 192 reg_class sp_reg(ESP);
 193 
 194 // Singleton class for instruction pointer
 195 // reg_class ip_reg(EIP);
 196 
 197 // Class of integer register pairs
 198 reg_class long_reg_with_ebp( EAX,EDX, ECX,EBX, EBP,EDI );
 199 // Class of integer register pairs (excluding EBP and EDI);
 200 reg_class long_reg_no_ebp( EAX,EDX, ECX,EBX );
 201 // Dynamic register class that selects between long_reg and long_reg_no_ebp.
 202 reg_class_dynamic long_reg(long_reg_no_ebp, long_reg_with_ebp, %{ PreserveFramePointer %});
 203 
 204 // Class of integer register pairs that aligns with calling convention
 205 reg_class eadx_reg( EAX,EDX );
 206 reg_class ebcx_reg( ECX,EBX );
 207 
 208 // Not AX or DX, used in divides
 209 reg_class nadx_reg_with_ebp(EBX, ECX, ESI, EDI, EBP);
 210 // Not AX or DX (and neither EBP), used in divides
 211 reg_class nadx_reg_no_ebp(EBX, ECX, ESI, EDI);
 212 // Dynamic register class that selects between nadx_reg and nadx_reg_no_ebp.
 213 reg_class_dynamic nadx_reg(nadx_reg_no_ebp, nadx_reg_with_ebp, %{ PreserveFramePointer %});
 214 
 215 // Floating point registers.  Notice FPR0 is not a choice.
 216 // FPR0 is not ever allocated; we use clever encodings to fake
 217 // a 2-address instructions out of Intels FP stack.
 218 reg_class fp_flt_reg( FPR1L,FPR2L,FPR3L,FPR4L,FPR5L,FPR6L,FPR7L );
 219 
 220 reg_class fp_dbl_reg( FPR1L,FPR1H, FPR2L,FPR2H, FPR3L,FPR3H,
 221                       FPR4L,FPR4H, FPR5L,FPR5H, FPR6L,FPR6H,
 222                       FPR7L,FPR7H );
 223 
 224 reg_class fp_flt_reg0( FPR1L );
 225 reg_class fp_dbl_reg0( FPR1L,FPR1H );
 226 reg_class fp_dbl_reg1( FPR2L,FPR2H );
 227 reg_class fp_dbl_notreg0( FPR2L,FPR2H, FPR3L,FPR3H, FPR4L,FPR4H,
 228                           FPR5L,FPR5H, FPR6L,FPR6H, FPR7L,FPR7H );
 229 
 230 %}
 231 
 232 
 233 //----------SOURCE BLOCK-------------------------------------------------------
 234 // This is a block of C++ code which provides values, functions, and
 235 // definitions necessary in the rest of the architecture description
 236 source_hpp %{
 237 // Must be visible to the DFA in dfa_x86_32.cpp
 238 extern bool is_operand_hi32_zero(Node* n);
 239 %}
 240 
 241 source %{
 242 #define   RELOC_IMM32    Assembler::imm_operand
 243 #define   RELOC_DISP32   Assembler::disp32_operand
 244 
 245 #define __ _masm.
 246 
 247 // How to find the high register of a Long pair, given the low register
 248 #define   HIGH_FROM_LOW(x) ((x)+2)
 249 
 250 // These masks are used to provide 128-bit aligned bitmasks to the XMM
 251 // instructions, to allow sign-masking or sign-bit flipping.  They allow
 252 // fast versions of NegF/NegD and AbsF/AbsD.
 253 
 254 // Note: 'double' and 'long long' have 32-bits alignment on x86.
 255 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
 256   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
 257   // of 128-bits operands for SSE instructions.
 258   jlong *operand = (jlong*)(((uintptr_t)adr)&((uintptr_t)(~0xF)));
 259   // Store the value to a 128-bits operand.
 260   operand[0] = lo;
 261   operand[1] = hi;
 262   return operand;
 263 }
 264 
 265 // Buffer for 128-bits masks used by SSE instructions.
 266 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
 267 
 268 // Static initialization during VM startup.
 269 static jlong *float_signmask_pool  = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF));
 270 static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF));
 271 static jlong *float_signflip_pool  = double_quadword(&fp_signmask_pool[3*2], CONST64(0x8000000080000000), CONST64(0x8000000080000000));
 272 static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
 273 
 274 // Offset hacking within calls.
 275 static int pre_call_resets_size() {
 276   int size = 0;
 277   Compile* C = Compile::current();
 278   if (C->in_24_bit_fp_mode()) {
 279     size += 6; // fldcw
 280   }
 281   if (C->max_vector_size() > 16) {
 282     size += 3; // vzeroupper
 283   }
 284   return size;
 285 }
 286 
 287 // !!!!! Special hack to get all type of calls to specify the byte offset
 288 //       from the start of the call to the point where the return address
 289 //       will point.
 290 int MachCallStaticJavaNode::ret_addr_offset() {
 291   return 5 + pre_call_resets_size();  // 5 bytes from start of call to where return address points  
 292 }
 293 
 294 int MachCallDynamicJavaNode::ret_addr_offset() {
 295   return 10 + pre_call_resets_size();  // 10 bytes from start of call to where return address points
 296 }
 297 
 298 static int sizeof_FFree_Float_Stack_All = -1;
 299 
 300 int MachCallRuntimeNode::ret_addr_offset() {
 301   assert(sizeof_FFree_Float_Stack_All != -1, "must have been emitted already");
 302   return sizeof_FFree_Float_Stack_All + 5 + pre_call_resets_size();
 303 }
 304 
 305 // Indicate if the safepoint node needs the polling page as an input.
 306 // Since x86 does have absolute addressing, it doesn't.
 307 bool SafePointNode::needs_polling_address_input() {
 308   return false;
 309 }
 310 
 311 //
 312 // Compute padding required for nodes which need alignment
 313 //
 314 
 315 // The address of the call instruction needs to be 4-byte aligned to
 316 // ensure that it does not span a cache line so that it can be patched.
 317 int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
 318   current_offset += pre_call_resets_size();  // skip fldcw, if any
 319   current_offset += 1;      // skip call opcode byte
 320   return round_to(current_offset, alignment_required()) - current_offset;
 321 }
 322 
 323 // The address of the call instruction needs to be 4-byte aligned to
 324 // ensure that it does not span a cache line so that it can be patched.
 325 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
 326   current_offset += pre_call_resets_size();  // skip fldcw, if any
 327   current_offset += 5;      // skip MOV instruction
 328   current_offset += 1;      // skip call opcode byte
 329   return round_to(current_offset, alignment_required()) - current_offset;
 330 }
 331 
 332 // EMIT_RM()
 333 void emit_rm(CodeBuffer &cbuf, int f1, int f2, int f3) {
 334   unsigned char c = (unsigned char)((f1 << 6) | (f2 << 3) | f3);
 335   cbuf.insts()->emit_int8(c);
 336 }
 337 
 338 // EMIT_CC()
 339 void emit_cc(CodeBuffer &cbuf, int f1, int f2) {
 340   unsigned char c = (unsigned char)( f1 | f2 );
 341   cbuf.insts()->emit_int8(c);
 342 }
 343 
 344 // EMIT_OPCODE()
 345 void emit_opcode(CodeBuffer &cbuf, int code) {
 346   cbuf.insts()->emit_int8((unsigned char) code);
 347 }
 348 
 349 // EMIT_OPCODE() w/ relocation information
 350 void emit_opcode(CodeBuffer &cbuf, int code, relocInfo::relocType reloc, int offset = 0) {
 351   cbuf.relocate(cbuf.insts_mark() + offset, reloc);
 352   emit_opcode(cbuf, code);
 353 }
 354 
 355 // EMIT_D8()
 356 void emit_d8(CodeBuffer &cbuf, int d8) {
 357   cbuf.insts()->emit_int8((unsigned char) d8);
 358 }
 359 
 360 // EMIT_D16()
 361 void emit_d16(CodeBuffer &cbuf, int d16) {
 362   cbuf.insts()->emit_int16(d16);
 363 }
 364 
 365 // EMIT_D32()
 366 void emit_d32(CodeBuffer &cbuf, int d32) {
 367   cbuf.insts()->emit_int32(d32);
 368 }
 369 
 370 // emit 32 bit value and construct relocation entry from relocInfo::relocType
 371 void emit_d32_reloc(CodeBuffer &cbuf, int d32, relocInfo::relocType reloc,
 372         int format) {
 373   cbuf.relocate(cbuf.insts_mark(), reloc, format);
 374   cbuf.insts()->emit_int32(d32);
 375 }
 376 
 377 // emit 32 bit value and construct relocation entry from RelocationHolder
 378 void emit_d32_reloc(CodeBuffer &cbuf, int d32, RelocationHolder const& rspec,
 379         int format) {
 380 #ifdef ASSERT
 381   if (rspec.reloc()->type() == relocInfo::oop_type && d32 != 0 && d32 != (int)Universe::non_oop_word()) {
 382     assert(cast_to_oop(d32)->is_oop() && (ScavengeRootsInCode || !cast_to_oop(d32)->is_scavengable()), "cannot embed scavengable oops in code");
 383   }
 384 #endif
 385   cbuf.relocate(cbuf.insts_mark(), rspec, format);
 386   cbuf.insts()->emit_int32(d32);
 387 }
 388 
 389 // Access stack slot for load or store
 390 void store_to_stackslot(CodeBuffer &cbuf, int opcode, int rm_field, int disp) {
 391   emit_opcode( cbuf, opcode );               // (e.g., FILD   [ESP+src])
 392   if( -128 <= disp && disp <= 127 ) {
 393     emit_rm( cbuf, 0x01, rm_field, ESP_enc );  // R/M byte
 394     emit_rm( cbuf, 0x00, ESP_enc, ESP_enc);    // SIB byte
 395     emit_d8 (cbuf, disp);     // Displacement  // R/M byte
 396   } else {
 397     emit_rm( cbuf, 0x02, rm_field, ESP_enc );  // R/M byte
 398     emit_rm( cbuf, 0x00, ESP_enc, ESP_enc);    // SIB byte
 399     emit_d32(cbuf, disp);     // Displacement  // R/M byte
 400   }
 401 }
 402 
 403    // rRegI ereg, memory mem) %{    // emit_reg_mem
 404 void encode_RegMem( CodeBuffer &cbuf, int reg_encoding, int base, int index, int scale, int displace, relocInfo::relocType disp_reloc ) {
 405   // There is no index & no scale, use form without SIB byte
 406   if ((index == 0x4) &&
 407       (scale == 0) && (base != ESP_enc)) {
 408     // If no displacement, mode is 0x0; unless base is [EBP]
 409     if ( (displace == 0) && (base != EBP_enc) ) {
 410       emit_rm(cbuf, 0x0, reg_encoding, base);
 411     }
 412     else {                    // If 8-bit displacement, mode 0x1
 413       if ((displace >= -128) && (displace <= 127)
 414           && (disp_reloc == relocInfo::none) ) {
 415         emit_rm(cbuf, 0x1, reg_encoding, base);
 416         emit_d8(cbuf, displace);
 417       }
 418       else {                  // If 32-bit displacement
 419         if (base == -1) { // Special flag for absolute address
 420           emit_rm(cbuf, 0x0, reg_encoding, 0x5);
 421           // (manual lies; no SIB needed here)
 422           if ( disp_reloc != relocInfo::none ) {
 423             emit_d32_reloc(cbuf, displace, disp_reloc, 1);
 424           } else {
 425             emit_d32      (cbuf, displace);
 426           }
 427         }
 428         else {                // Normal base + offset
 429           emit_rm(cbuf, 0x2, reg_encoding, base);
 430           if ( disp_reloc != relocInfo::none ) {
 431             emit_d32_reloc(cbuf, displace, disp_reloc, 1);
 432           } else {
 433             emit_d32      (cbuf, displace);
 434           }
 435         }
 436       }
 437     }
 438   }
 439   else {                      // Else, encode with the SIB byte
 440     // If no displacement, mode is 0x0; unless base is [EBP]
 441     if (displace == 0 && (base != EBP_enc)) {  // If no displacement
 442       emit_rm(cbuf, 0x0, reg_encoding, 0x4);
 443       emit_rm(cbuf, scale, index, base);
 444     }
 445     else {                    // If 8-bit displacement, mode 0x1
 446       if ((displace >= -128) && (displace <= 127)
 447           && (disp_reloc == relocInfo::none) ) {
 448         emit_rm(cbuf, 0x1, reg_encoding, 0x4);
 449         emit_rm(cbuf, scale, index, base);
 450         emit_d8(cbuf, displace);
 451       }
 452       else {                  // If 32-bit displacement
 453         if (base == 0x04 ) {
 454           emit_rm(cbuf, 0x2, reg_encoding, 0x4);
 455           emit_rm(cbuf, scale, index, 0x04);
 456         } else {
 457           emit_rm(cbuf, 0x2, reg_encoding, 0x4);
 458           emit_rm(cbuf, scale, index, base);
 459         }
 460         if ( disp_reloc != relocInfo::none ) {
 461           emit_d32_reloc(cbuf, displace, disp_reloc, 1);
 462         } else {
 463           emit_d32      (cbuf, displace);
 464         }
 465       }
 466     }
 467   }
 468 }
 469 
 470 
 471 void encode_Copy( CodeBuffer &cbuf, int dst_encoding, int src_encoding ) {
 472   if( dst_encoding == src_encoding ) {
 473     // reg-reg copy, use an empty encoding
 474   } else {
 475     emit_opcode( cbuf, 0x8B );
 476     emit_rm(cbuf, 0x3, dst_encoding, src_encoding );
 477   }
 478 }
 479 
 480 void emit_cmpfp_fixup(MacroAssembler& _masm) {
 481   Label exit;
 482   __ jccb(Assembler::noParity, exit);
 483   __ pushf();
 484   //
 485   // comiss/ucomiss instructions set ZF,PF,CF flags and
 486   // zero OF,AF,SF for NaN values.
 487   // Fixup flags by zeroing ZF,PF so that compare of NaN
 488   // values returns 'less than' result (CF is set).
 489   // Leave the rest of flags unchanged.
 490   //
 491   //    7 6 5 4 3 2 1 0
 492   //   |S|Z|r|A|r|P|r|C|  (r - reserved bit)
 493   //    0 0 1 0 1 0 1 1   (0x2B)
 494   //
 495   __ andl(Address(rsp, 0), 0xffffff2b);
 496   __ popf();
 497   __ bind(exit);
 498 }
 499 
 500 void emit_cmpfp3(MacroAssembler& _masm, Register dst) {
 501   Label done;
 502   __ movl(dst, -1);
 503   __ jcc(Assembler::parity, done);
 504   __ jcc(Assembler::below, done);
 505   __ setb(Assembler::notEqual, dst);
 506   __ movzbl(dst, dst);
 507   __ bind(done);
 508 }
 509 
 510 
 511 //=============================================================================
 512 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
 513 
 514 int Compile::ConstantTable::calculate_table_base_offset() const {
 515   return 0;  // absolute addressing, no offset
 516 }
 517 
 518 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
 519 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
 520   ShouldNotReachHere();
 521 }
 522 
 523 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
 524   // Empty encoding
 525 }
 526 
 527 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
 528   return 0;
 529 }
 530 
 531 #ifndef PRODUCT
 532 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
 533   st->print("# MachConstantBaseNode (empty encoding)");
 534 }
 535 #endif
 536 
 537 
 538 //=============================================================================
 539 #ifndef PRODUCT
 540 void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
 541   Compile* C = ra_->C;
 542 
 543   int framesize = C->frame_size_in_bytes();
 544   int bangsize = C->bang_size_in_bytes();
 545   assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
 546   // Remove wordSize for return addr which is already pushed.
 547   framesize -= wordSize;
 548 
 549   if (C->need_stack_bang(bangsize)) {
 550     framesize -= wordSize;
 551     st->print("# stack bang (%d bytes)", bangsize);
 552     st->print("\n\t");
 553     st->print("PUSH   EBP\t# Save EBP");
 554     if (PreserveFramePointer) {
 555       st->print("\n\t");
 556       st->print("MOV    EBP, ESP\t# Save the caller's SP into EBP");
 557     }
 558     if (framesize) {
 559       st->print("\n\t");
 560       st->print("SUB    ESP, #%d\t# Create frame",framesize);
 561     }
 562   } else {
 563     st->print("SUB    ESP, #%d\t# Create frame",framesize);
 564     st->print("\n\t");
 565     framesize -= wordSize;
 566     st->print("MOV    [ESP + #%d], EBP\t# Save EBP",framesize);
 567     if (PreserveFramePointer) {
 568       st->print("\n\t");
 569       st->print("MOV    EBP, [ESP + #%d]\t# Save the caller's SP into EBP", (framesize + wordSize));
 570     }
 571   }
 572 
 573   if (VerifyStackAtCalls) {
 574     st->print("\n\t");
 575     framesize -= wordSize;
 576     st->print("MOV    [ESP + #%d], 0xBADB100D\t# Majik cookie for stack depth check",framesize);
 577   }
 578 
 579   if( C->in_24_bit_fp_mode() ) {
 580     st->print("\n\t");
 581     st->print("FLDCW  \t# load 24 bit fpu control word");
 582   }
 583   if (UseSSE >= 2 && VerifyFPU) {
 584     st->print("\n\t");
 585     st->print("# verify FPU stack (must be clean on entry)");
 586   }
 587 
 588 #ifdef ASSERT
 589   if (VerifyStackAtCalls) {
 590     st->print("\n\t");
 591     st->print("# stack alignment check");
 592   }
 593 #endif
 594   st->cr();
 595 }
 596 #endif
 597 
 598 
 599 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 600   Compile* C = ra_->C;
 601   MacroAssembler _masm(&cbuf);
 602 
 603   int framesize = C->frame_size_in_bytes();
 604   int bangsize = C->bang_size_in_bytes();
 605 
 606   __ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, C->in_24_bit_fp_mode());
 607 
 608   C->set_frame_complete(cbuf.insts_size());
 609 
 610   if (C->has_mach_constant_base_node()) {
 611     // NOTE: We set the table base offset here because users might be
 612     // emitted before MachConstantBaseNode.
 613     Compile::ConstantTable& constant_table = C->constant_table();
 614     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
 615   }
 616 }
 617 
 618 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
 619   return MachNode::size(ra_); // too many variables; just compute it the hard way
 620 }
 621 
 622 int MachPrologNode::reloc() const {
 623   return 0; // a large enough number
 624 }
 625 
 626 //=============================================================================
 627 #ifndef PRODUCT
 628 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
 629   Compile *C = ra_->C;
 630   int framesize = C->frame_size_in_bytes();
 631   assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
 632   // Remove two words for return addr and rbp,
 633   framesize -= 2*wordSize;
 634 
 635   if (C->max_vector_size() > 16) {
 636     st->print("VZEROUPPER");
 637     st->cr(); st->print("\t");
 638   }
 639   if (C->in_24_bit_fp_mode()) {
 640     st->print("FLDCW  standard control word");
 641     st->cr(); st->print("\t");
 642   }
 643   if (framesize) {
 644     st->print("ADD    ESP,%d\t# Destroy frame",framesize);
 645     st->cr(); st->print("\t");
 646   }
 647   st->print_cr("POPL   EBP"); st->print("\t");
 648   if (do_polling() && C->is_method_compilation()) {
 649     st->print("TEST   PollPage,EAX\t! Poll Safepoint");
 650     st->cr(); st->print("\t");
 651   }
 652 }
 653 #endif
 654 
 655 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 656   Compile *C = ra_->C;
 657 
 658   if (C->max_vector_size() > 16) {
 659     // Clear upper bits of YMM registers when current compiled code uses
 660     // wide vectors to avoid AVX <-> SSE transition penalty during call.
 661     MacroAssembler masm(&cbuf);
 662     masm.vzeroupper();
 663   }
 664   // If method set FPU control word, restore to standard control word
 665   if (C->in_24_bit_fp_mode()) {
 666     MacroAssembler masm(&cbuf);
 667     masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
 668   }
 669 
 670   int framesize = C->frame_size_in_bytes();
 671   assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
 672   // Remove two words for return addr and rbp,
 673   framesize -= 2*wordSize;
 674 
 675   // Note that VerifyStackAtCalls' Majik cookie does not change the frame size popped here
 676 
 677   if (framesize >= 128) {
 678     emit_opcode(cbuf, 0x81); // add  SP, #framesize
 679     emit_rm(cbuf, 0x3, 0x00, ESP_enc);
 680     emit_d32(cbuf, framesize);
 681   } else if (framesize) {
 682     emit_opcode(cbuf, 0x83); // add  SP, #framesize
 683     emit_rm(cbuf, 0x3, 0x00, ESP_enc);
 684     emit_d8(cbuf, framesize);
 685   }
 686 
 687   emit_opcode(cbuf, 0x58 | EBP_enc);
 688 
 689   if (do_polling() && C->is_method_compilation()) {
 690     cbuf.relocate(cbuf.insts_end(), relocInfo::poll_return_type, 0);
 691     emit_opcode(cbuf,0x85);
 692     emit_rm(cbuf, 0x0, EAX_enc, 0x5); // EAX
 693     emit_d32(cbuf, (intptr_t)os::get_polling_page());
 694   }
 695 }
 696 
 697 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 698   Compile *C = ra_->C;
 699   // If method set FPU control word, restore to standard control word
 700   int size = C->in_24_bit_fp_mode() ? 6 : 0;
 701   if (C->max_vector_size() > 16) size += 3; // vzeroupper
 702   if (do_polling() && C->is_method_compilation()) size += 6;
 703 
 704   int framesize = C->frame_size_in_bytes();
 705   assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
 706   // Remove two words for return addr and rbp,
 707   framesize -= 2*wordSize;
 708 
 709   size++; // popl rbp,
 710 
 711   if (framesize >= 128) {
 712     size += 6;
 713   } else {
 714     size += framesize ? 3 : 0;
 715   }
 716   return size;
 717 }
 718 
 719 int MachEpilogNode::reloc() const {
 720   return 0; // a large enough number
 721 }
 722 
 723 const Pipeline * MachEpilogNode::pipeline() const {
 724   return MachNode::pipeline_class();
 725 }
 726 
 727 int MachEpilogNode::safepoint_offset() const { return 0; }
 728 
 729 //=============================================================================
 730 
 731 enum RC { rc_bad, rc_int, rc_float, rc_xmm, rc_stack };
 732 static enum RC rc_class( OptoReg::Name reg ) {
 733 
 734   if( !OptoReg::is_valid(reg)  ) return rc_bad;
 735   if (OptoReg::is_stack(reg)) return rc_stack;
 736 
 737   VMReg r = OptoReg::as_VMReg(reg);
 738   if (r->is_Register()) return rc_int;
 739   if (r->is_FloatRegister()) {
 740     assert(UseSSE < 2, "shouldn't be used in SSE2+ mode");
 741     return rc_float;
 742   }
 743   assert(r->is_XMMRegister(), "must be");
 744   return rc_xmm;
 745 }
 746 
 747 static int impl_helper( CodeBuffer *cbuf, bool do_size, bool is_load, int offset, int reg,
 748                         int opcode, const char *op_str, int size, outputStream* st ) {
 749   if( cbuf ) {
 750     emit_opcode  (*cbuf, opcode );
 751     encode_RegMem(*cbuf, Matcher::_regEncode[reg], ESP_enc, 0x4, 0, offset, relocInfo::none);
 752 #ifndef PRODUCT
 753   } else if( !do_size ) {
 754     if( size != 0 ) st->print("\n\t");
 755     if( opcode == 0x8B || opcode == 0x89 ) { // MOV
 756       if( is_load ) st->print("%s   %s,[ESP + #%d]",op_str,Matcher::regName[reg],offset);
 757       else          st->print("%s   [ESP + #%d],%s",op_str,offset,Matcher::regName[reg]);
 758     } else { // FLD, FST, PUSH, POP
 759       st->print("%s [ESP + #%d]",op_str,offset);
 760     }
 761 #endif
 762   }
 763   int offset_size = (offset == 0) ? 0 : ((offset <= 127) ? 1 : 4);
 764   return size+3+offset_size;
 765 }
 766 
 767 // Helper for XMM registers.  Extra opcode bits, limited syntax.
 768 static int impl_x_helper( CodeBuffer *cbuf, bool do_size, bool is_load,
 769                          int offset, int reg_lo, int reg_hi, int size, outputStream* st ) {
 770   if (cbuf) {
 771     MacroAssembler _masm(cbuf);
 772     if (reg_lo+1 == reg_hi) { // double move?
 773       if (is_load) {
 774         __ movdbl(as_XMMRegister(Matcher::_regEncode[reg_lo]), Address(rsp, offset));
 775       } else {
 776         __ movdbl(Address(rsp, offset), as_XMMRegister(Matcher::_regEncode[reg_lo]));
 777       }
 778     } else {
 779       if (is_load) {
 780         __ movflt(as_XMMRegister(Matcher::_regEncode[reg_lo]), Address(rsp, offset));
 781       } else {
 782         __ movflt(Address(rsp, offset), as_XMMRegister(Matcher::_regEncode[reg_lo]));
 783       }
 784     }
 785 #ifndef PRODUCT
 786   } else if (!do_size) {
 787     if (size != 0) st->print("\n\t");
 788     if (reg_lo+1 == reg_hi) { // double move?
 789       if (is_load) st->print("%s %s,[ESP + #%d]",
 790                               UseXmmLoadAndClearUpper ? "MOVSD " : "MOVLPD",
 791                               Matcher::regName[reg_lo], offset);
 792       else         st->print("MOVSD  [ESP + #%d],%s",
 793                               offset, Matcher::regName[reg_lo]);
 794     } else {
 795       if (is_load) st->print("MOVSS  %s,[ESP + #%d]",
 796                               Matcher::regName[reg_lo], offset);
 797       else         st->print("MOVSS  [ESP + #%d],%s",
 798                               offset, Matcher::regName[reg_lo]);
 799     }
 800 #endif
 801   }
 802   int offset_size = (offset == 0) ? 0 : ((offset <= 127) ? 1 : 4);
 803   // VEX_2bytes prefix is used if UseAVX > 0, so it takes the same 2 bytes as SIMD prefix.
 804   return size+5+offset_size;
 805 }
 806 
 807 
 808 static int impl_movx_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
 809                             int src_hi, int dst_hi, int size, outputStream* st ) {
 810   if (cbuf) {
 811     MacroAssembler _masm(cbuf);
 812     if (src_lo+1 == src_hi && dst_lo+1 == dst_hi) { // double move?
 813       __ movdbl(as_XMMRegister(Matcher::_regEncode[dst_lo]),
 814                 as_XMMRegister(Matcher::_regEncode[src_lo]));
 815     } else {
 816       __ movflt(as_XMMRegister(Matcher::_regEncode[dst_lo]),
 817                 as_XMMRegister(Matcher::_regEncode[src_lo]));
 818     }
 819 #ifndef PRODUCT
 820   } else if (!do_size) {
 821     if (size != 0) st->print("\n\t");
 822     if (UseXmmRegToRegMoveAll) {//Use movaps,movapd to move between xmm registers
 823       if (src_lo+1 == src_hi && dst_lo+1 == dst_hi) { // double move?
 824         st->print("MOVAPD %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]);
 825       } else {
 826         st->print("MOVAPS %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]);
 827       }
 828     } else {
 829       if( src_lo+1 == src_hi && dst_lo+1 == dst_hi ) { // double move?
 830         st->print("MOVSD  %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]);
 831       } else {
 832         st->print("MOVSS  %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]);
 833       }
 834     }
 835 #endif
 836   }
 837   // VEX_2bytes prefix is used if UseAVX > 0, and it takes the same 2 bytes as SIMD prefix.
 838   // Only MOVAPS SSE prefix uses 1 byte.
 839   int sz = 4;
 840   if (!(src_lo+1 == src_hi && dst_lo+1 == dst_hi) &&
 841       UseXmmRegToRegMoveAll && (UseAVX == 0)) sz = 3;
 842   return size + sz;
 843 }
 844 
 845 static int impl_movgpr2x_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
 846                             int src_hi, int dst_hi, int size, outputStream* st ) {
 847   // 32-bit
 848   if (cbuf) {
 849     MacroAssembler _masm(cbuf);
 850     __ movdl(as_XMMRegister(Matcher::_regEncode[dst_lo]),
 851              as_Register(Matcher::_regEncode[src_lo]));
 852 #ifndef PRODUCT
 853   } else if (!do_size) {
 854     st->print("movdl   %s, %s\t# spill", Matcher::regName[dst_lo], Matcher::regName[src_lo]);
 855 #endif
 856   }
 857   return 4;
 858 }
 859 
 860 
 861 static int impl_movx2gpr_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
 862                                  int src_hi, int dst_hi, int size, outputStream* st ) {
 863   // 32-bit
 864   if (cbuf) {
 865     MacroAssembler _masm(cbuf);
 866     __ movdl(as_Register(Matcher::_regEncode[dst_lo]),
 867              as_XMMRegister(Matcher::_regEncode[src_lo]));
 868 #ifndef PRODUCT
 869   } else if (!do_size) {
 870     st->print("movdl   %s, %s\t# spill", Matcher::regName[dst_lo], Matcher::regName[src_lo]);
 871 #endif
 872   }
 873   return 4;
 874 }
 875 
 876 static int impl_mov_helper( CodeBuffer *cbuf, bool do_size, int src, int dst, int size, outputStream* st ) {
 877   if( cbuf ) {
 878     emit_opcode(*cbuf, 0x8B );
 879     emit_rm    (*cbuf, 0x3, Matcher::_regEncode[dst], Matcher::_regEncode[src] );
 880 #ifndef PRODUCT
 881   } else if( !do_size ) {
 882     if( size != 0 ) st->print("\n\t");
 883     st->print("MOV    %s,%s",Matcher::regName[dst],Matcher::regName[src]);
 884 #endif
 885   }
 886   return size+2;
 887 }
 888 
 889 static int impl_fp_store_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int src_hi, int dst_lo, int dst_hi,
 890                                  int offset, int size, outputStream* st ) {
 891   if( src_lo != FPR1L_num ) {      // Move value to top of FP stack, if not already there
 892     if( cbuf ) {
 893       emit_opcode( *cbuf, 0xD9 );  // FLD (i.e., push it)
 894       emit_d8( *cbuf, 0xC0-1+Matcher::_regEncode[src_lo] );
 895 #ifndef PRODUCT
 896     } else if( !do_size ) {
 897       if( size != 0 ) st->print("\n\t");
 898       st->print("FLD    %s",Matcher::regName[src_lo]);
 899 #endif
 900     }
 901     size += 2;
 902   }
 903 
 904   int st_op = (src_lo != FPR1L_num) ? EBX_num /*store & pop*/ : EDX_num /*store no pop*/;
 905   const char *op_str;
 906   int op;
 907   if( src_lo+1 == src_hi && dst_lo+1 == dst_hi ) { // double store?
 908     op_str = (src_lo != FPR1L_num) ? "FSTP_D" : "FST_D ";
 909     op = 0xDD;
 910   } else {                   // 32-bit store
 911     op_str = (src_lo != FPR1L_num) ? "FSTP_S" : "FST_S ";
 912     op = 0xD9;
 913     assert( !OptoReg::is_valid(src_hi) && !OptoReg::is_valid(dst_hi), "no non-adjacent float-stores" );
 914   }
 915 
 916   return impl_helper(cbuf,do_size,false,offset,st_op,op,op_str,size, st);
 917 }
 918 
 919 // Next two methods are shared by 32- and 64-bit VM. They are defined in x86.ad.
 920 static int vec_mov_helper(CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
 921                           int src_hi, int dst_hi, uint ireg, outputStream* st);
 922 
 923 static int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load,
 924                             int stack_offset, int reg, uint ireg, outputStream* st);
 925 
 926 static int vec_stack_to_stack_helper(CodeBuffer *cbuf, bool do_size, int src_offset,
 927                                      int dst_offset, uint ireg, outputStream* st) {
 928   int calc_size = 0;
 929   int src_offset_size = (src_offset == 0) ? 0 : ((src_offset < 0x80) ? 1 : 4);
 930   int dst_offset_size = (dst_offset == 0) ? 0 : ((dst_offset < 0x80) ? 1 : 4);
 931   switch (ireg) {
 932   case Op_VecS:
 933     calc_size = 3+src_offset_size + 3+dst_offset_size;
 934     break;
 935   case Op_VecD:
 936     calc_size = 3+src_offset_size + 3+dst_offset_size;
 937     src_offset += 4;
 938     dst_offset += 4;
 939     src_offset_size = (src_offset == 0) ? 0 : ((src_offset < 0x80) ? 1 : 4);
 940     dst_offset_size = (dst_offset == 0) ? 0 : ((dst_offset < 0x80) ? 1 : 4);
 941     calc_size += 3+src_offset_size + 3+dst_offset_size;
 942     break;
 943   case Op_VecX:
 944     calc_size = 6 + 6 + 5+src_offset_size + 5+dst_offset_size;
 945     break;
 946   case Op_VecY:
 947     calc_size = 6 + 6 + 5+src_offset_size + 5+dst_offset_size;
 948     break;
 949   default:
 950     ShouldNotReachHere();
 951   }
 952   if (cbuf) {
 953     MacroAssembler _masm(cbuf);
 954     int offset = __ offset();
 955     switch (ireg) {
 956     case Op_VecS:
 957       __ pushl(Address(rsp, src_offset));
 958       __ popl (Address(rsp, dst_offset));
 959       break;
 960     case Op_VecD:
 961       __ pushl(Address(rsp, src_offset));
 962       __ popl (Address(rsp, dst_offset));
 963       __ pushl(Address(rsp, src_offset+4));
 964       __ popl (Address(rsp, dst_offset+4));
 965       break;
 966     case Op_VecX:
 967       __ movdqu(Address(rsp, -16), xmm0);
 968       __ movdqu(xmm0, Address(rsp, src_offset));
 969       __ movdqu(Address(rsp, dst_offset), xmm0);
 970       __ movdqu(xmm0, Address(rsp, -16));
 971       break;
 972     case Op_VecY:
 973       __ vmovdqu(Address(rsp, -32), xmm0);
 974       __ vmovdqu(xmm0, Address(rsp, src_offset));
 975       __ vmovdqu(Address(rsp, dst_offset), xmm0);
 976       __ vmovdqu(xmm0, Address(rsp, -32));
 977       break;
 978     default:
 979       ShouldNotReachHere();
 980     }
 981     int size = __ offset() - offset;
 982     assert(size == calc_size, "incorrect size calculattion");
 983     return size;
 984 #ifndef PRODUCT
 985   } else if (!do_size) {
 986     switch (ireg) {
 987     case Op_VecS:
 988       st->print("pushl   [rsp + #%d]\t# 32-bit mem-mem spill\n\t"
 989                 "popl    [rsp + #%d]",
 990                 src_offset, dst_offset);
 991       break;
 992     case Op_VecD:
 993       st->print("pushl   [rsp + #%d]\t# 64-bit mem-mem spill\n\t"
 994                 "popq    [rsp + #%d]\n\t"
 995                 "pushl   [rsp + #%d]\n\t"
 996                 "popq    [rsp + #%d]",
 997                 src_offset, dst_offset, src_offset+4, dst_offset+4);
 998       break;
 999      case Op_VecX:
1000       st->print("movdqu  [rsp - #16], xmm0\t# 128-bit mem-mem spill\n\t"
1001                 "movdqu  xmm0, [rsp + #%d]\n\t"
1002                 "movdqu  [rsp + #%d], xmm0\n\t"
1003                 "movdqu  xmm0, [rsp - #16]",
1004                 src_offset, dst_offset);
1005       break;
1006     case Op_VecY:
1007       st->print("vmovdqu [rsp - #32], xmm0\t# 256-bit mem-mem spill\n\t"
1008                 "vmovdqu xmm0, [rsp + #%d]\n\t"
1009                 "vmovdqu [rsp + #%d], xmm0\n\t"
1010                 "vmovdqu xmm0, [rsp - #32]",
1011                 src_offset, dst_offset);
1012       break;
1013     default:
1014       ShouldNotReachHere();
1015     }
1016 #endif
1017   }
1018   return calc_size;
1019 }
1020 
1021 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
1022   // Get registers to move
1023   OptoReg::Name src_second = ra_->get_reg_second(in(1));
1024   OptoReg::Name src_first = ra_->get_reg_first(in(1));
1025   OptoReg::Name dst_second = ra_->get_reg_second(this );
1026   OptoReg::Name dst_first = ra_->get_reg_first(this );
1027 
1028   enum RC src_second_rc = rc_class(src_second);
1029   enum RC src_first_rc = rc_class(src_first);
1030   enum RC dst_second_rc = rc_class(dst_second);
1031   enum RC dst_first_rc = rc_class(dst_first);
1032 
1033   assert( OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
1034 
1035   // Generate spill code!
1036   int size = 0;
1037 
1038   if( src_first == dst_first && src_second == dst_second )
1039     return size;            // Self copy, no move
1040 
1041   if (bottom_type()->isa_vect() != NULL) {
1042     uint ireg = ideal_reg();
1043     assert((src_first_rc != rc_int && dst_first_rc != rc_int), "sanity");
1044     assert((src_first_rc != rc_float && dst_first_rc != rc_float), "sanity");
1045     assert((ireg == Op_VecS || ireg == Op_VecD || ireg == Op_VecX || ireg == Op_VecY), "sanity");
1046     if( src_first_rc == rc_stack && dst_first_rc == rc_stack ) {
1047       // mem -> mem
1048       int src_offset = ra_->reg2offset(src_first);
1049       int dst_offset = ra_->reg2offset(dst_first);
1050       return vec_stack_to_stack_helper(cbuf, do_size, src_offset, dst_offset, ireg, st);
1051     } else if (src_first_rc == rc_xmm && dst_first_rc == rc_xmm ) {
1052       return vec_mov_helper(cbuf, do_size, src_first, dst_first, src_second, dst_second, ireg, st);
1053     } else if (src_first_rc == rc_xmm && dst_first_rc == rc_stack ) {
1054       int stack_offset = ra_->reg2offset(dst_first);
1055       return vec_spill_helper(cbuf, do_size, false, stack_offset, src_first, ireg, st);
1056     } else if (src_first_rc == rc_stack && dst_first_rc == rc_xmm ) {
1057       int stack_offset = ra_->reg2offset(src_first);
1058       return vec_spill_helper(cbuf, do_size, true,  stack_offset, dst_first, ireg, st);
1059     } else {
1060       ShouldNotReachHere();
1061     }
1062   }
1063 
1064   // --------------------------------------
1065   // Check for mem-mem move.  push/pop to move.
1066   if( src_first_rc == rc_stack && dst_first_rc == rc_stack ) {
1067     if( src_second == dst_first ) { // overlapping stack copy ranges
1068       assert( src_second_rc == rc_stack && dst_second_rc == rc_stack, "we only expect a stk-stk copy here" );
1069       size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_second),ESI_num,0xFF,"PUSH  ",size, st);
1070       size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_second),EAX_num,0x8F,"POP   ",size, st);
1071       src_second_rc = dst_second_rc = rc_bad;  // flag as already moved the second bits
1072     }
1073     // move low bits
1074     size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_first),ESI_num,0xFF,"PUSH  ",size, st);
1075     size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_first),EAX_num,0x8F,"POP   ",size, st);
1076     if( src_second_rc == rc_stack && dst_second_rc == rc_stack ) { // mov second bits
1077       size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_second),ESI_num,0xFF,"PUSH  ",size, st);
1078       size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_second),EAX_num,0x8F,"POP   ",size, st);
1079     }
1080     return size;
1081   }
1082 
1083   // --------------------------------------
1084   // Check for integer reg-reg copy
1085   if( src_first_rc == rc_int && dst_first_rc == rc_int )
1086     size = impl_mov_helper(cbuf,do_size,src_first,dst_first,size, st);
1087 
1088   // Check for integer store
1089   if( src_first_rc == rc_int && dst_first_rc == rc_stack )
1090     size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_first),src_first,0x89,"MOV ",size, st);
1091 
1092   // Check for integer load
1093   if( dst_first_rc == rc_int && src_first_rc == rc_stack )
1094     size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_first),dst_first,0x8B,"MOV ",size, st);
1095 
1096   // Check for integer reg-xmm reg copy
1097   if( src_first_rc == rc_int && dst_first_rc == rc_xmm ) {
1098     assert( (src_second_rc == rc_bad && dst_second_rc == rc_bad),
1099             "no 64 bit integer-float reg moves" );
1100     return impl_movgpr2x_helper(cbuf,do_size,src_first,dst_first,src_second, dst_second, size, st);
1101   }
1102   // --------------------------------------
1103   // Check for float reg-reg copy
1104   if( src_first_rc == rc_float && dst_first_rc == rc_float ) {
1105     assert( (src_second_rc == rc_bad && dst_second_rc == rc_bad) ||
1106             (src_first+1 == src_second && dst_first+1 == dst_second), "no non-adjacent float-moves" );
1107     if( cbuf ) {
1108 
1109       // Note the mucking with the register encode to compensate for the 0/1
1110       // indexing issue mentioned in a comment in the reg_def sections
1111       // for FPR registers many lines above here.
1112 
1113       if( src_first != FPR1L_num ) {
1114         emit_opcode  (*cbuf, 0xD9 );           // FLD    ST(i)
1115         emit_d8      (*cbuf, 0xC0+Matcher::_regEncode[src_first]-1 );
1116         emit_opcode  (*cbuf, 0xDD );           // FSTP   ST(i)
1117         emit_d8      (*cbuf, 0xD8+Matcher::_regEncode[dst_first] );
1118      } else {
1119         emit_opcode  (*cbuf, 0xDD );           // FST    ST(i)
1120         emit_d8      (*cbuf, 0xD0+Matcher::_regEncode[dst_first]-1 );
1121      }
1122 #ifndef PRODUCT
1123     } else if( !do_size ) {
1124       if( size != 0 ) st->print("\n\t");
1125       if( src_first != FPR1L_num ) st->print("FLD    %s\n\tFSTP   %s",Matcher::regName[src_first],Matcher::regName[dst_first]);
1126       else                      st->print(             "FST    %s",                            Matcher::regName[dst_first]);
1127 #endif
1128     }
1129     return size + ((src_first != FPR1L_num) ? 2+2 : 2);
1130   }
1131 
1132   // Check for float store
1133   if( src_first_rc == rc_float && dst_first_rc == rc_stack ) {
1134     return impl_fp_store_helper(cbuf,do_size,src_first,src_second,dst_first,dst_second,ra_->reg2offset(dst_first),size, st);
1135   }
1136 
1137   // Check for float load
1138   if( dst_first_rc == rc_float && src_first_rc == rc_stack ) {
1139     int offset = ra_->reg2offset(src_first);
1140     const char *op_str;
1141     int op;
1142     if( src_first+1 == src_second && dst_first+1 == dst_second ) { // double load?
1143       op_str = "FLD_D";
1144       op = 0xDD;
1145     } else {                   // 32-bit load
1146       op_str = "FLD_S";
1147       op = 0xD9;
1148       assert( src_second_rc == rc_bad && dst_second_rc == rc_bad, "no non-adjacent float-loads" );
1149     }
1150     if( cbuf ) {
1151       emit_opcode  (*cbuf, op );
1152       encode_RegMem(*cbuf, 0x0, ESP_enc, 0x4, 0, offset, relocInfo::none);
1153       emit_opcode  (*cbuf, 0xDD );           // FSTP   ST(i)
1154       emit_d8      (*cbuf, 0xD8+Matcher::_regEncode[dst_first] );
1155 #ifndef PRODUCT
1156     } else if( !do_size ) {
1157       if( size != 0 ) st->print("\n\t");
1158       st->print("%s  ST,[ESP + #%d]\n\tFSTP   %s",op_str, offset,Matcher::regName[dst_first]);
1159 #endif
1160     }
1161     int offset_size = (offset == 0) ? 0 : ((offset <= 127) ? 1 : 4);
1162     return size + 3+offset_size+2;
1163   }
1164 
1165   // Check for xmm reg-reg copy
1166   if( src_first_rc == rc_xmm && dst_first_rc == rc_xmm ) {
1167     assert( (src_second_rc == rc_bad && dst_second_rc == rc_bad) ||
1168             (src_first+1 == src_second && dst_first+1 == dst_second),
1169             "no non-adjacent float-moves" );
1170     return impl_movx_helper(cbuf,do_size,src_first,dst_first,src_second, dst_second, size, st);
1171   }
1172 
1173   // Check for xmm reg-integer reg copy
1174   if( src_first_rc == rc_xmm && dst_first_rc == rc_int ) {
1175     assert( (src_second_rc == rc_bad && dst_second_rc == rc_bad),
1176             "no 64 bit float-integer reg moves" );
1177     return impl_movx2gpr_helper(cbuf,do_size,src_first,dst_first,src_second, dst_second, size, st);
1178   }
1179 
1180   // Check for xmm store
1181   if( src_first_rc == rc_xmm && dst_first_rc == rc_stack ) {
1182     return impl_x_helper(cbuf,do_size,false,ra_->reg2offset(dst_first),src_first, src_second, size, st);
1183   }
1184 
1185   // Check for float xmm load
1186   if( dst_first_rc == rc_xmm && src_first_rc == rc_stack ) {
1187     return impl_x_helper(cbuf,do_size,true ,ra_->reg2offset(src_first),dst_first, dst_second, size, st);
1188   }
1189 
1190   // Copy from float reg to xmm reg
1191   if( dst_first_rc == rc_xmm && src_first_rc == rc_float ) {
1192     // copy to the top of stack from floating point reg
1193     // and use LEA to preserve flags
1194     if( cbuf ) {
1195       emit_opcode(*cbuf,0x8D);  // LEA  ESP,[ESP-8]
1196       emit_rm(*cbuf, 0x1, ESP_enc, 0x04);
1197       emit_rm(*cbuf, 0x0, 0x04, ESP_enc);
1198       emit_d8(*cbuf,0xF8);
1199 #ifndef PRODUCT
1200     } else if( !do_size ) {
1201       if( size != 0 ) st->print("\n\t");
1202       st->print("LEA    ESP,[ESP-8]");
1203 #endif
1204     }
1205     size += 4;
1206 
1207     size = impl_fp_store_helper(cbuf,do_size,src_first,src_second,dst_first,dst_second,0,size, st);
1208 
1209     // Copy from the temp memory to the xmm reg.
1210     size = impl_x_helper(cbuf,do_size,true ,0,dst_first, dst_second, size, st);
1211 
1212     if( cbuf ) {
1213       emit_opcode(*cbuf,0x8D);  // LEA  ESP,[ESP+8]
1214       emit_rm(*cbuf, 0x1, ESP_enc, 0x04);
1215       emit_rm(*cbuf, 0x0, 0x04, ESP_enc);
1216       emit_d8(*cbuf,0x08);
1217 #ifndef PRODUCT
1218     } else if( !do_size ) {
1219       if( size != 0 ) st->print("\n\t");
1220       st->print("LEA    ESP,[ESP+8]");
1221 #endif
1222     }
1223     size += 4;
1224     return size;
1225   }
1226 
1227   assert( size > 0, "missed a case" );
1228 
1229   // --------------------------------------------------------------------
1230   // Check for second bits still needing moving.
1231   if( src_second == dst_second )
1232     return size;               // Self copy; no move
1233   assert( src_second_rc != rc_bad && dst_second_rc != rc_bad, "src_second & dst_second cannot be Bad" );
1234 
1235   // Check for second word int-int move
1236   if( src_second_rc == rc_int && dst_second_rc == rc_int )
1237     return impl_mov_helper(cbuf,do_size,src_second,dst_second,size, st);
1238 
1239   // Check for second word integer store
1240   if( src_second_rc == rc_int && dst_second_rc == rc_stack )
1241     return impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_second),src_second,0x89,"MOV ",size, st);
1242 
1243   // Check for second word integer load
1244   if( dst_second_rc == rc_int && src_second_rc == rc_stack )
1245     return impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_second),dst_second,0x8B,"MOV ",size, st);
1246 
1247 
1248   Unimplemented();
1249   return 0; // Mute compiler
1250 }
1251 
1252 #ifndef PRODUCT
1253 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream* st) const {
1254   implementation( NULL, ra_, false, st );
1255 }
1256 #endif
1257 
1258 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1259   implementation( &cbuf, ra_, false, NULL );
1260 }
1261 
1262 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1263   return implementation( NULL, ra_, true, NULL );
1264 }
1265 
1266 
1267 //=============================================================================
1268 #ifndef PRODUCT
1269 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1270   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1271   int reg = ra_->get_reg_first(this);
1272   st->print("LEA    %s,[ESP + #%d]",Matcher::regName[reg],offset);
1273 }
1274 #endif
1275 
1276 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1277   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1278   int reg = ra_->get_encode(this);
1279   if( offset >= 128 ) {
1280     emit_opcode(cbuf, 0x8D);      // LEA  reg,[SP+offset]
1281     emit_rm(cbuf, 0x2, reg, 0x04);
1282     emit_rm(cbuf, 0x0, 0x04, ESP_enc);
1283     emit_d32(cbuf, offset);
1284   }
1285   else {
1286     emit_opcode(cbuf, 0x8D);      // LEA  reg,[SP+offset]
1287     emit_rm(cbuf, 0x1, reg, 0x04);
1288     emit_rm(cbuf, 0x0, 0x04, ESP_enc);
1289     emit_d8(cbuf, offset);
1290   }
1291 }
1292 
1293 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1294   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1295   if( offset >= 128 ) {
1296     return 7;
1297   }
1298   else {
1299     return 4;
1300   }
1301 }
1302 
1303 //=============================================================================
1304 #ifndef PRODUCT
1305 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1306   st->print_cr(  "CMP    EAX,[ECX+4]\t# Inline cache check");
1307   st->print_cr("\tJNE    SharedRuntime::handle_ic_miss_stub");
1308   st->print_cr("\tNOP");
1309   st->print_cr("\tNOP");
1310   if( !OptoBreakpoint )
1311     st->print_cr("\tNOP");
1312 }
1313 #endif
1314 
1315 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1316   MacroAssembler masm(&cbuf);
1317 #ifdef ASSERT
1318   uint insts_size = cbuf.insts_size();
1319 #endif
1320   masm.cmpptr(rax, Address(rcx, oopDesc::klass_offset_in_bytes()));
1321   masm.jump_cc(Assembler::notEqual,
1322                RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1323   /* WARNING these NOPs are critical so that verified entry point is properly
1324      aligned for patching by NativeJump::patch_verified_entry() */
1325   int nops_cnt = 2;
1326   if( !OptoBreakpoint ) // Leave space for int3
1327      nops_cnt += 1;
1328   masm.nop(nops_cnt);
1329 
1330   assert(cbuf.insts_size() - insts_size == size(ra_), "checking code size of inline cache node");
1331 }
1332 
1333 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1334   return OptoBreakpoint ? 11 : 12;
1335 }
1336 
1337 
1338 //=============================================================================
1339 
1340 int Matcher::regnum_to_fpu_offset(int regnum) {
1341   return regnum - 32; // The FP registers are in the second chunk
1342 }
1343 
1344 // This is UltraSparc specific, true just means we have fast l2f conversion
1345 const bool Matcher::convL2FSupported(void) {
1346   return true;
1347 }
1348 
1349 // Is this branch offset short enough that a short branch can be used?
1350 //
1351 // NOTE: If the platform does not provide any short branch variants, then
1352 //       this method should return false for offset 0.
1353 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
1354   // The passed offset is relative to address of the branch.
1355   // On 86 a branch displacement is calculated relative to address
1356   // of a next instruction.
1357   offset -= br_size;
1358 
1359   // the short version of jmpConUCF2 contains multiple branches,
1360   // making the reach slightly less
1361   if (rule == jmpConUCF2_rule)
1362     return (-126 <= offset && offset <= 125);
1363   return (-128 <= offset && offset <= 127);
1364 }
1365 
1366 const bool Matcher::isSimpleConstant64(jlong value) {
1367   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
1368   return false;
1369 }
1370 
1371 // The ecx parameter to rep stos for the ClearArray node is in dwords.
1372 const bool Matcher::init_array_count_is_in_bytes = false;
1373 
1374 // Threshold size for cleararray.
1375 const int Matcher::init_array_short_size = 8 * BytesPerLong;
1376 
1377 // Needs 2 CMOV's for longs.
1378 const int Matcher::long_cmove_cost() { return 1; }
1379 
1380 // No CMOVF/CMOVD with SSE/SSE2
1381 const int Matcher::float_cmove_cost() { return (UseSSE>=1) ? ConditionalMoveLimit : 0; }
1382 
1383 // Does the CPU require late expand (see block.cpp for description of late expand)?
1384 const bool Matcher::require_postalloc_expand = false;
1385 
1386 // Should the Matcher clone shifts on addressing modes, expecting them to
1387 // be subsumed into complex addressing expressions or compute them into
1388 // registers?  True for Intel but false for most RISCs
1389 const bool Matcher::clone_shift_expressions = true;
1390 
1391 // Do we need to mask the count passed to shift instructions or does
1392 // the cpu only look at the lower 5/6 bits anyway?
1393 const bool Matcher::need_masked_shift_count = false;
1394 
1395 bool Matcher::narrow_oop_use_complex_address() {
1396   ShouldNotCallThis();
1397   return true;
1398 }
1399 
1400 bool Matcher::narrow_klass_use_complex_address() {
1401   ShouldNotCallThis();
1402   return true;
1403 }
1404 
1405 
1406 // Is it better to copy float constants, or load them directly from memory?
1407 // Intel can load a float constant from a direct address, requiring no
1408 // extra registers.  Most RISCs will have to materialize an address into a
1409 // register first, so they would do better to copy the constant from stack.
1410 const bool Matcher::rematerialize_float_constants = true;
1411 
1412 // If CPU can load and store mis-aligned doubles directly then no fixup is
1413 // needed.  Else we split the double into 2 integer pieces and move it
1414 // piece-by-piece.  Only happens when passing doubles into C code as the
1415 // Java calling convention forces doubles to be aligned.
1416 const bool Matcher::misaligned_doubles_ok = true;
1417 
1418 
1419 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
1420   // Get the memory operand from the node
1421   uint numopnds = node->num_opnds();        // Virtual call for number of operands
1422   uint skipped  = node->oper_input_base();  // Sum of leaves skipped so far
1423   assert( idx >= skipped, "idx too low in pd_implicit_null_fixup" );
1424   uint opcnt     = 1;                 // First operand
1425   uint num_edges = node->_opnds[1]->num_edges(); // leaves for first operand
1426   while( idx >= skipped+num_edges ) {
1427     skipped += num_edges;
1428     opcnt++;                          // Bump operand count
1429     assert( opcnt < numopnds, "Accessing non-existent operand" );
1430     num_edges = node->_opnds[opcnt]->num_edges(); // leaves for next operand
1431   }
1432 
1433   MachOper *memory = node->_opnds[opcnt];
1434   MachOper *new_memory = NULL;
1435   switch (memory->opcode()) {
1436   case DIRECT:
1437   case INDOFFSET32X:
1438     // No transformation necessary.
1439     return;
1440   case INDIRECT:
1441     new_memory = new indirect_win95_safeOper( );
1442     break;
1443   case INDOFFSET8:
1444     new_memory = new indOffset8_win95_safeOper(memory->disp(NULL, NULL, 0));
1445     break;
1446   case INDOFFSET32:
1447     new_memory = new indOffset32_win95_safeOper(memory->disp(NULL, NULL, 0));
1448     break;
1449   case INDINDEXOFFSET:
1450     new_memory = new indIndexOffset_win95_safeOper(memory->disp(NULL, NULL, 0));
1451     break;
1452   case INDINDEXSCALE:
1453     new_memory = new indIndexScale_win95_safeOper(memory->scale());
1454     break;
1455   case INDINDEXSCALEOFFSET:
1456     new_memory = new indIndexScaleOffset_win95_safeOper(memory->scale(), memory->disp(NULL, NULL, 0));
1457     break;
1458   case LOAD_LONG_INDIRECT:
1459   case LOAD_LONG_INDOFFSET32:
1460     // Does not use EBP as address register, use { EDX, EBX, EDI, ESI}
1461     return;
1462   default:
1463     assert(false, "unexpected memory operand in pd_implicit_null_fixup()");
1464     return;
1465   }
1466   node->_opnds[opcnt] = new_memory;
1467 }
1468 
1469 // Advertise here if the CPU requires explicit rounding operations
1470 // to implement the UseStrictFP mode.
1471 const bool Matcher::strict_fp_requires_explicit_rounding = true;
1472 
1473 // Are floats conerted to double when stored to stack during deoptimization?
1474 // On x32 it is stored with convertion only when FPU is used for floats.
1475 bool Matcher::float_in_double() { return (UseSSE == 0); }
1476 
1477 // Do ints take an entire long register or just half?
1478 const bool Matcher::int_in_long = false;
1479 
1480 // Return whether or not this register is ever used as an argument.  This
1481 // function is used on startup to build the trampoline stubs in generateOptoStub.
1482 // Registers not mentioned will be killed by the VM call in the trampoline, and
1483 // arguments in those registers not be available to the callee.
1484 bool Matcher::can_be_java_arg( int reg ) {
1485   if(  reg == ECX_num   || reg == EDX_num   ) return true;
1486   if( (reg == XMM0_num  || reg == XMM1_num ) && UseSSE>=1 ) return true;
1487   if( (reg == XMM0b_num || reg == XMM1b_num) && UseSSE>=2 ) return true;
1488   return false;
1489 }
1490 
1491 bool Matcher::is_spillable_arg( int reg ) {
1492   return can_be_java_arg(reg);
1493 }
1494 
1495 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
1496   // Use hardware integer DIV instruction when
1497   // it is faster than a code which use multiply.
1498   // Only when constant divisor fits into 32 bit
1499   // (min_jint is excluded to get only correct
1500   // positive 32 bit values from negative).
1501   return VM_Version::has_fast_idiv() &&
1502          (divisor == (int)divisor && divisor != min_jint);
1503 }
1504 
1505 // Register for DIVI projection of divmodI
1506 RegMask Matcher::divI_proj_mask() {
1507   return EAX_REG_mask();
1508 }
1509 
1510 // Register for MODI projection of divmodI
1511 RegMask Matcher::modI_proj_mask() {
1512   return EDX_REG_mask();
1513 }
1514 
1515 // Register for DIVL projection of divmodL
1516 RegMask Matcher::divL_proj_mask() {
1517   ShouldNotReachHere();
1518   return RegMask();
1519 }
1520 
1521 // Register for MODL projection of divmodL
1522 RegMask Matcher::modL_proj_mask() {
1523   ShouldNotReachHere();
1524   return RegMask();
1525 }
1526 
1527 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
1528   return NO_REG_mask();
1529 }
1530 
1531 // Returns true if the high 32 bits of the value is known to be zero.
1532 bool is_operand_hi32_zero(Node* n) {
1533   int opc = n->Opcode();
1534   if (opc == Op_AndL) {
1535     Node* o2 = n->in(2);
1536     if (o2->is_Con() && (o2->get_long() & 0xFFFFFFFF00000000LL) == 0LL) {
1537       return true;
1538     }
1539   }
1540   if (opc == Op_ConL && (n->get_long() & 0xFFFFFFFF00000000LL) == 0LL) {
1541     return true;
1542   }
1543   return false;
1544 }
1545 
1546 %}
1547 
1548 //----------ENCODING BLOCK-----------------------------------------------------
1549 // This block specifies the encoding classes used by the compiler to output
1550 // byte streams.  Encoding classes generate functions which are called by
1551 // Machine Instruction Nodes in order to generate the bit encoding of the
1552 // instruction.  Operands specify their base encoding interface with the
1553 // interface keyword.  There are currently supported four interfaces,
1554 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER.  REG_INTER causes an
1555 // operand to generate a function which returns its register number when
1556 // queried.   CONST_INTER causes an operand to generate a function which
1557 // returns the value of the constant when queried.  MEMORY_INTER causes an
1558 // operand to generate four functions which return the Base Register, the
1559 // Index Register, the Scale Value, and the Offset Value of the operand when
1560 // queried.  COND_INTER causes an operand to generate six functions which
1561 // return the encoding code (ie - encoding bits for the instruction)
1562 // associated with each basic boolean condition for a conditional instruction.
1563 // Instructions specify two basic values for encoding.  They use the
1564 // ins_encode keyword to specify their encoding class (which must be one of
1565 // the class names specified in the encoding block), and they use the
1566 // opcode keyword to specify, in order, their primary, secondary, and
1567 // tertiary opcode.  Only the opcode sections which a particular instruction
1568 // needs for encoding need to be specified.
1569 encode %{
1570   // Build emit functions for each basic byte or larger field in the intel
1571   // encoding scheme (opcode, rm, sib, immediate), and call them from C++
1572   // code in the enc_class source block.  Emit functions will live in the
1573   // main source block for now.  In future, we can generalize this by
1574   // adding a syntax that specifies the sizes of fields in an order,
1575   // so that the adlc can build the emit functions automagically
1576 
1577   // Emit primary opcode
1578   enc_class OpcP %{
1579     emit_opcode(cbuf, $primary);
1580   %}
1581 
1582   // Emit secondary opcode
1583   enc_class OpcS %{
1584     emit_opcode(cbuf, $secondary);
1585   %}
1586 
1587   // Emit opcode directly
1588   enc_class Opcode(immI d8) %{
1589     emit_opcode(cbuf, $d8$$constant);
1590   %}
1591 
1592   enc_class SizePrefix %{
1593     emit_opcode(cbuf,0x66);
1594   %}
1595 
1596   enc_class RegReg (rRegI dst, rRegI src) %{    // RegReg(Many)
1597     emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
1598   %}
1599 
1600   enc_class OpcRegReg (immI opcode, rRegI dst, rRegI src) %{    // OpcRegReg(Many)
1601     emit_opcode(cbuf,$opcode$$constant);
1602     emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
1603   %}
1604 
1605   enc_class mov_r32_imm0( rRegI dst ) %{
1606     emit_opcode( cbuf, 0xB8 + $dst$$reg ); // 0xB8+ rd   -- MOV r32  ,imm32
1607     emit_d32   ( cbuf, 0x0  );             //                         imm32==0x0
1608   %}
1609 
1610   enc_class cdq_enc %{
1611     // Full implementation of Java idiv and irem; checks for
1612     // special case as described in JVM spec., p.243 & p.271.
1613     //
1614     //         normal case                           special case
1615     //
1616     // input : rax,: dividend                         min_int
1617     //         reg: divisor                          -1
1618     //
1619     // output: rax,: quotient  (= rax, idiv reg)       min_int
1620     //         rdx: remainder (= rax, irem reg)       0
1621     //
1622     //  Code sequnce:
1623     //
1624     //  81 F8 00 00 00 80    cmp         rax,80000000h
1625     //  0F 85 0B 00 00 00    jne         normal_case
1626     //  33 D2                xor         rdx,edx
1627     //  83 F9 FF             cmp         rcx,0FFh
1628     //  0F 84 03 00 00 00    je          done
1629     //                  normal_case:
1630     //  99                   cdq
1631     //  F7 F9                idiv        rax,ecx
1632     //                  done:
1633     //
1634     emit_opcode(cbuf,0x81); emit_d8(cbuf,0xF8);
1635     emit_opcode(cbuf,0x00); emit_d8(cbuf,0x00);
1636     emit_opcode(cbuf,0x00); emit_d8(cbuf,0x80);                     // cmp rax,80000000h
1637     emit_opcode(cbuf,0x0F); emit_d8(cbuf,0x85);
1638     emit_opcode(cbuf,0x0B); emit_d8(cbuf,0x00);
1639     emit_opcode(cbuf,0x00); emit_d8(cbuf,0x00);                     // jne normal_case
1640     emit_opcode(cbuf,0x33); emit_d8(cbuf,0xD2);                     // xor rdx,edx
1641     emit_opcode(cbuf,0x83); emit_d8(cbuf,0xF9); emit_d8(cbuf,0xFF); // cmp rcx,0FFh
1642     emit_opcode(cbuf,0x0F); emit_d8(cbuf,0x84);
1643     emit_opcode(cbuf,0x03); emit_d8(cbuf,0x00);
1644     emit_opcode(cbuf,0x00); emit_d8(cbuf,0x00);                     // je done
1645     // normal_case:
1646     emit_opcode(cbuf,0x99);                                         // cdq
1647     // idiv (note: must be emitted by the user of this rule)
1648     // normal:
1649   %}
1650 
1651   // Dense encoding for older common ops
1652   enc_class Opc_plus(immI opcode, rRegI reg) %{
1653     emit_opcode(cbuf, $opcode$$constant + $reg$$reg);
1654   %}
1655 
1656 
1657   // Opcde enc_class for 8/32 bit immediate instructions with sign-extension
1658   enc_class OpcSE (immI imm) %{ // Emit primary opcode and set sign-extend bit
1659     // Check for 8-bit immediate, and set sign extend bit in opcode
1660     if (($imm$$constant >= -128) && ($imm$$constant <= 127)) {
1661       emit_opcode(cbuf, $primary | 0x02);
1662     }
1663     else {                          // If 32-bit immediate
1664       emit_opcode(cbuf, $primary);
1665     }
1666   %}
1667 
1668   enc_class OpcSErm (rRegI dst, immI imm) %{    // OpcSEr/m
1669     // Emit primary opcode and set sign-extend bit
1670     // Check for 8-bit immediate, and set sign extend bit in opcode
1671     if (($imm$$constant >= -128) && ($imm$$constant <= 127)) {
1672       emit_opcode(cbuf, $primary | 0x02);    }
1673     else {                          // If 32-bit immediate
1674       emit_opcode(cbuf, $primary);
1675     }
1676     // Emit r/m byte with secondary opcode, after primary opcode.
1677     emit_rm(cbuf, 0x3, $secondary, $dst$$reg);
1678   %}
1679 
1680   enc_class Con8or32 (immI imm) %{    // Con8or32(storeImmI), 8 or 32 bits
1681     // Check for 8-bit immediate, and set sign extend bit in opcode
1682     if (($imm$$constant >= -128) && ($imm$$constant <= 127)) {
1683       $$$emit8$imm$$constant;
1684     }
1685     else {                          // If 32-bit immediate
1686       // Output immediate
1687       $$$emit32$imm$$constant;
1688     }
1689   %}
1690 
1691   enc_class Long_OpcSErm_Lo(eRegL dst, immL imm) %{
1692     // Emit primary opcode and set sign-extend bit
1693     // Check for 8-bit immediate, and set sign extend bit in opcode
1694     int con = (int)$imm$$constant; // Throw away top bits
1695     emit_opcode(cbuf, ((con >= -128) && (con <= 127)) ? ($primary | 0x02) : $primary);
1696     // Emit r/m byte with secondary opcode, after primary opcode.
1697     emit_rm(cbuf, 0x3, $secondary, $dst$$reg);
1698     if ((con >= -128) && (con <= 127)) emit_d8 (cbuf,con);
1699     else                               emit_d32(cbuf,con);
1700   %}
1701 
1702   enc_class Long_OpcSErm_Hi(eRegL dst, immL imm) %{
1703     // Emit primary opcode and set sign-extend bit
1704     // Check for 8-bit immediate, and set sign extend bit in opcode
1705     int con = (int)($imm$$constant >> 32); // Throw away bottom bits
1706     emit_opcode(cbuf, ((con >= -128) && (con <= 127)) ? ($primary | 0x02) : $primary);
1707     // Emit r/m byte with tertiary opcode, after primary opcode.
1708     emit_rm(cbuf, 0x3, $tertiary, HIGH_FROM_LOW($dst$$reg));
1709     if ((con >= -128) && (con <= 127)) emit_d8 (cbuf,con);
1710     else                               emit_d32(cbuf,con);
1711   %}
1712 
1713   enc_class OpcSReg (rRegI dst) %{    // BSWAP
1714     emit_cc(cbuf, $secondary, $dst$$reg );
1715   %}
1716 
1717   enc_class bswap_long_bytes(eRegL dst) %{ // BSWAP
1718     int destlo = $dst$$reg;
1719     int desthi = HIGH_FROM_LOW(destlo);
1720     // bswap lo
1721     emit_opcode(cbuf, 0x0F);
1722     emit_cc(cbuf, 0xC8, destlo);
1723     // bswap hi
1724     emit_opcode(cbuf, 0x0F);
1725     emit_cc(cbuf, 0xC8, desthi);
1726     // xchg lo and hi
1727     emit_opcode(cbuf, 0x87);
1728     emit_rm(cbuf, 0x3, destlo, desthi);
1729   %}
1730 
1731   enc_class RegOpc (rRegI div) %{    // IDIV, IMOD, JMP indirect, ...
1732     emit_rm(cbuf, 0x3, $secondary, $div$$reg );
1733   %}
1734 
1735   enc_class enc_cmov(cmpOp cop ) %{ // CMOV
1736     $$$emit8$primary;
1737     emit_cc(cbuf, $secondary, $cop$$cmpcode);
1738   %}
1739 
1740   enc_class enc_cmov_dpr(cmpOp cop, regDPR src ) %{ // CMOV
1741     int op = 0xDA00 + $cop$$cmpcode + ($src$$reg-1);
1742     emit_d8(cbuf, op >> 8 );
1743     emit_d8(cbuf, op & 255);
1744   %}
1745 
1746   // emulate a CMOV with a conditional branch around a MOV
1747   enc_class enc_cmov_branch( cmpOp cop, immI brOffs ) %{ // CMOV
1748     // Invert sense of branch from sense of CMOV
1749     emit_cc( cbuf, 0x70, ($cop$$cmpcode^1) );
1750     emit_d8( cbuf, $brOffs$$constant );
1751   %}
1752 
1753   enc_class enc_PartialSubtypeCheck( ) %{
1754     Register Redi = as_Register(EDI_enc); // result register
1755     Register Reax = as_Register(EAX_enc); // super class
1756     Register Recx = as_Register(ECX_enc); // killed
1757     Register Resi = as_Register(ESI_enc); // sub class
1758     Label miss;
1759 
1760     MacroAssembler _masm(&cbuf);
1761     __ check_klass_subtype_slow_path(Resi, Reax, Recx, Redi,
1762                                      NULL, &miss,
1763                                      /*set_cond_codes:*/ true);
1764     if ($primary) {
1765       __ xorptr(Redi, Redi);
1766     }
1767     __ bind(miss);
1768   %}
1769 
1770   enc_class FFree_Float_Stack_All %{    // Free_Float_Stack_All
1771     MacroAssembler masm(&cbuf);
1772     int start = masm.offset();
1773     if (UseSSE >= 2) {
1774       if (VerifyFPU) {
1775         masm.verify_FPU(0, "must be empty in SSE2+ mode");
1776       }
1777     } else {
1778       // External c_calling_convention expects the FPU stack to be 'clean'.
1779       // Compiled code leaves it dirty.  Do cleanup now.
1780       masm.empty_FPU_stack();
1781     }
1782     if (sizeof_FFree_Float_Stack_All == -1) {
1783       sizeof_FFree_Float_Stack_All = masm.offset() - start;
1784     } else {
1785       assert(masm.offset() - start == sizeof_FFree_Float_Stack_All, "wrong size");
1786     }
1787   %}
1788 
1789   enc_class Verify_FPU_For_Leaf %{
1790     if( VerifyFPU ) {
1791       MacroAssembler masm(&cbuf);
1792       masm.verify_FPU( -3, "Returning from Runtime Leaf call");
1793     }
1794   %}
1795 
1796   enc_class Java_To_Runtime (method meth) %{    // CALL Java_To_Runtime, Java_To_Runtime_Leaf
1797     // This is the instruction starting address for relocation info.
1798     cbuf.set_insts_mark();
1799     $$$emit8$primary;
1800     // CALL directly to the runtime
1801     emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
1802                 runtime_call_Relocation::spec(), RELOC_IMM32 );
1803 
1804     if (UseSSE >= 2) {
1805       MacroAssembler _masm(&cbuf);
1806       BasicType rt = tf()->return_type();
1807 
1808       if ((rt == T_FLOAT || rt == T_DOUBLE) && !return_value_is_used()) {
1809         // A C runtime call where the return value is unused.  In SSE2+
1810         // mode the result needs to be removed from the FPU stack.  It's
1811         // likely that this function call could be removed by the
1812         // optimizer if the C function is a pure function.
1813         __ ffree(0);
1814       } else if (rt == T_FLOAT) {
1815         __ lea(rsp, Address(rsp, -4));
1816         __ fstp_s(Address(rsp, 0));
1817         __ movflt(xmm0, Address(rsp, 0));
1818         __ lea(rsp, Address(rsp,  4));
1819       } else if (rt == T_DOUBLE) {
1820         __ lea(rsp, Address(rsp, -8));
1821         __ fstp_d(Address(rsp, 0));
1822         __ movdbl(xmm0, Address(rsp, 0));
1823         __ lea(rsp, Address(rsp,  8));
1824       }
1825     }
1826   %}
1827 
1828 
1829   enc_class pre_call_resets %{
1830     // If method sets FPU control word restore it here
1831     debug_only(int off0 = cbuf.insts_size());
1832     if (ra_->C->in_24_bit_fp_mode()) {
1833       MacroAssembler _masm(&cbuf);
1834       __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
1835     }
1836     if (ra_->C->max_vector_size() > 16) {
1837       // Clear upper bits of YMM registers when current compiled code uses
1838       // wide vectors to avoid AVX <-> SSE transition penalty during call.
1839       MacroAssembler _masm(&cbuf);
1840       __ vzeroupper();
1841     }
1842     debug_only(int off1 = cbuf.insts_size());
1843     assert(off1 - off0 == pre_call_resets_size(), "correct size prediction");
1844   %}
1845 
1846   enc_class post_call_FPU %{
1847     // If method sets FPU control word do it here also
1848     if (Compile::current()->in_24_bit_fp_mode()) {
1849       MacroAssembler masm(&cbuf);
1850       masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
1851     }
1852   %}
1853 
1854   enc_class Java_Static_Call (method meth) %{    // JAVA STATIC CALL
1855     // CALL to fixup routine.  Fixup routine uses ScopeDesc info to determine
1856     // who we intended to call.
1857     cbuf.set_insts_mark();
1858     $$$emit8$primary;
1859     if (!_method) {
1860       emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
1861                      runtime_call_Relocation::spec(), RELOC_IMM32 );
1862     } else if (_optimized_virtual) {
1863       emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
1864                      opt_virtual_call_Relocation::spec(), RELOC_IMM32 );
1865     } else {
1866       emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
1867                      static_call_Relocation::spec(), RELOC_IMM32 );
1868     }
1869     if (_method) {  // Emit stub for static call.
1870       CompiledStaticCall::emit_to_interp_stub(cbuf);
1871     }
1872   %}
1873 
1874   enc_class Java_Dynamic_Call (method meth) %{    // JAVA DYNAMIC CALL
1875     MacroAssembler _masm(&cbuf);
1876     __ ic_call((address)$meth$$method);
1877   %}
1878 
1879   enc_class Java_Compiled_Call (method meth) %{    // JAVA COMPILED CALL
1880     int disp = in_bytes(Method::from_compiled_offset());
1881     assert( -128 <= disp && disp <= 127, "compiled_code_offset isn't small");
1882 
1883     // CALL *[EAX+in_bytes(Method::from_compiled_code_entry_point_offset())]
1884     cbuf.set_insts_mark();
1885     $$$emit8$primary;
1886     emit_rm(cbuf, 0x01, $secondary, EAX_enc );  // R/M byte
1887     emit_d8(cbuf, disp);             // Displacement
1888 
1889   %}
1890 
1891 //   Following encoding is no longer used, but may be restored if calling
1892 //   convention changes significantly.
1893 //   Became: Xor_Reg(EBP), Java_To_Runtime( labl )
1894 //
1895 //   enc_class Java_Interpreter_Call (label labl) %{    // JAVA INTERPRETER CALL
1896 //     // int ic_reg     = Matcher::inline_cache_reg();
1897 //     // int ic_encode  = Matcher::_regEncode[ic_reg];
1898 //     // int imo_reg    = Matcher::interpreter_method_oop_reg();
1899 //     // int imo_encode = Matcher::_regEncode[imo_reg];
1900 //
1901 //     // // Interpreter expects method_oop in EBX, currently a callee-saved register,
1902 //     // // so we load it immediately before the call
1903 //     // emit_opcode(cbuf, 0x8B);                     // MOV    imo_reg,ic_reg  # method_oop
1904 //     // emit_rm(cbuf, 0x03, imo_encode, ic_encode ); // R/M byte
1905 //
1906 //     // xor rbp,ebp
1907 //     emit_opcode(cbuf, 0x33);
1908 //     emit_rm(cbuf, 0x3, EBP_enc, EBP_enc);
1909 //
1910 //     // CALL to interpreter.
1911 //     cbuf.set_insts_mark();
1912 //     $$$emit8$primary;
1913 //     emit_d32_reloc(cbuf, ($labl$$label - (int)(cbuf.insts_end()) - 4),
1914 //                 runtime_call_Relocation::spec(), RELOC_IMM32 );
1915 //   %}
1916 
1917   enc_class RegOpcImm (rRegI dst, immI8 shift) %{    // SHL, SAR, SHR
1918     $$$emit8$primary;
1919     emit_rm(cbuf, 0x3, $secondary, $dst$$reg);
1920     $$$emit8$shift$$constant;
1921   %}
1922 
1923   enc_class LdImmI (rRegI dst, immI src) %{    // Load Immediate
1924     // Load immediate does not have a zero or sign extended version
1925     // for 8-bit immediates
1926     emit_opcode(cbuf, 0xB8 + $dst$$reg);
1927     $$$emit32$src$$constant;
1928   %}
1929 
1930   enc_class LdImmP (rRegI dst, immI src) %{    // Load Immediate
1931     // Load immediate does not have a zero or sign extended version
1932     // for 8-bit immediates
1933     emit_opcode(cbuf, $primary + $dst$$reg);
1934     $$$emit32$src$$constant;
1935   %}
1936 
1937   enc_class LdImmL_Lo( eRegL dst, immL src) %{    // Load Immediate
1938     // Load immediate does not have a zero or sign extended version
1939     // for 8-bit immediates
1940     int dst_enc = $dst$$reg;
1941     int src_con = $src$$constant & 0x0FFFFFFFFL;
1942     if (src_con == 0) {
1943       // xor dst, dst
1944       emit_opcode(cbuf, 0x33);
1945       emit_rm(cbuf, 0x3, dst_enc, dst_enc);
1946     } else {
1947       emit_opcode(cbuf, $primary + dst_enc);
1948       emit_d32(cbuf, src_con);
1949     }
1950   %}
1951 
1952   enc_class LdImmL_Hi( eRegL dst, immL src) %{    // Load Immediate
1953     // Load immediate does not have a zero or sign extended version
1954     // for 8-bit immediates
1955     int dst_enc = $dst$$reg + 2;
1956     int src_con = ((julong)($src$$constant)) >> 32;
1957     if (src_con == 0) {
1958       // xor dst, dst
1959       emit_opcode(cbuf, 0x33);
1960       emit_rm(cbuf, 0x3, dst_enc, dst_enc);
1961     } else {
1962       emit_opcode(cbuf, $primary + dst_enc);
1963       emit_d32(cbuf, src_con);
1964     }
1965   %}
1966 
1967 
1968   // Encode a reg-reg copy.  If it is useless, then empty encoding.
1969   enc_class enc_Copy( rRegI dst, rRegI src ) %{
1970     encode_Copy( cbuf, $dst$$reg, $src$$reg );
1971   %}
1972 
1973   enc_class enc_CopyL_Lo( rRegI dst, eRegL src ) %{
1974     encode_Copy( cbuf, $dst$$reg, $src$$reg );
1975   %}
1976 
1977   enc_class RegReg (rRegI dst, rRegI src) %{    // RegReg(Many)
1978     emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
1979   %}
1980 
1981   enc_class RegReg_Lo(eRegL dst, eRegL src) %{    // RegReg(Many)
1982     $$$emit8$primary;
1983     emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
1984   %}
1985 
1986   enc_class RegReg_Hi(eRegL dst, eRegL src) %{    // RegReg(Many)
1987     $$$emit8$secondary;
1988     emit_rm(cbuf, 0x3, HIGH_FROM_LOW($dst$$reg), HIGH_FROM_LOW($src$$reg));
1989   %}
1990 
1991   enc_class RegReg_Lo2(eRegL dst, eRegL src) %{    // RegReg(Many)
1992     emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
1993   %}
1994 
1995   enc_class RegReg_Hi2(eRegL dst, eRegL src) %{    // RegReg(Many)
1996     emit_rm(cbuf, 0x3, HIGH_FROM_LOW($dst$$reg), HIGH_FROM_LOW($src$$reg));
1997   %}
1998 
1999   enc_class RegReg_HiLo( eRegL src, rRegI dst ) %{
2000     emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW($src$$reg));
2001   %}
2002 
2003   enc_class Con32 (immI src) %{    // Con32(storeImmI)
2004     // Output immediate
2005     $$$emit32$src$$constant;
2006   %}
2007 
2008   enc_class Con32FPR_as_bits(immFPR src) %{        // storeF_imm
2009     // Output Float immediate bits
2010     jfloat jf = $src$$constant;
2011     int    jf_as_bits = jint_cast( jf );
2012     emit_d32(cbuf, jf_as_bits);
2013   %}
2014 
2015   enc_class Con32F_as_bits(immF src) %{      // storeX_imm
2016     // Output Float immediate bits
2017     jfloat jf = $src$$constant;
2018     int    jf_as_bits = jint_cast( jf );
2019     emit_d32(cbuf, jf_as_bits);
2020   %}
2021 
2022   enc_class Con16 (immI src) %{    // Con16(storeImmI)
2023     // Output immediate
2024     $$$emit16$src$$constant;
2025   %}
2026 
2027   enc_class Con_d32(immI src) %{
2028     emit_d32(cbuf,$src$$constant);
2029   %}
2030 
2031   enc_class conmemref (eRegP t1) %{    // Con32(storeImmI)
2032     // Output immediate memory reference
2033     emit_rm(cbuf, 0x00, $t1$$reg, 0x05 );
2034     emit_d32(cbuf, 0x00);
2035   %}
2036 
2037   enc_class lock_prefix( ) %{
2038     if( os::is_MP() )
2039       emit_opcode(cbuf,0xF0);         // [Lock]
2040   %}
2041 
2042   // Cmp-xchg long value.
2043   // Note: we need to swap rbx, and rcx before and after the
2044   //       cmpxchg8 instruction because the instruction uses
2045   //       rcx as the high order word of the new value to store but
2046   //       our register encoding uses rbx,.
2047   enc_class enc_cmpxchg8(eSIRegP mem_ptr) %{
2048 
2049     // XCHG  rbx,ecx
2050     emit_opcode(cbuf,0x87);
2051     emit_opcode(cbuf,0xD9);
2052     // [Lock]
2053     if( os::is_MP() )
2054       emit_opcode(cbuf,0xF0);
2055     // CMPXCHG8 [Eptr]
2056     emit_opcode(cbuf,0x0F);
2057     emit_opcode(cbuf,0xC7);
2058     emit_rm( cbuf, 0x0, 1, $mem_ptr$$reg );
2059     // XCHG  rbx,ecx
2060     emit_opcode(cbuf,0x87);
2061     emit_opcode(cbuf,0xD9);
2062   %}
2063 
2064   enc_class enc_cmpxchg(eSIRegP mem_ptr) %{
2065     // [Lock]
2066     if( os::is_MP() )
2067       emit_opcode(cbuf,0xF0);
2068 
2069     // CMPXCHG [Eptr]
2070     emit_opcode(cbuf,0x0F);
2071     emit_opcode(cbuf,0xB1);
2072     emit_rm( cbuf, 0x0, 1, $mem_ptr$$reg );
2073   %}
2074 
2075   enc_class enc_flags_ne_to_boolean( iRegI res ) %{
2076     int res_encoding = $res$$reg;
2077 
2078     // MOV  res,0
2079     emit_opcode( cbuf, 0xB8 + res_encoding);
2080     emit_d32( cbuf, 0 );
2081     // JNE,s  fail
2082     emit_opcode(cbuf,0x75);
2083     emit_d8(cbuf, 5 );
2084     // MOV  res,1
2085     emit_opcode( cbuf, 0xB8 + res_encoding);
2086     emit_d32( cbuf, 1 );
2087     // fail:
2088   %}
2089 
2090   enc_class set_instruction_start( ) %{
2091     cbuf.set_insts_mark();            // Mark start of opcode for reloc info in mem operand
2092   %}
2093 
2094   enc_class RegMem (rRegI ereg, memory mem) %{    // emit_reg_mem
2095     int reg_encoding = $ereg$$reg;
2096     int base  = $mem$$base;
2097     int index = $mem$$index;
2098     int scale = $mem$$scale;
2099     int displace = $mem$$disp;
2100     relocInfo::relocType disp_reloc = $mem->disp_reloc();
2101     encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_reloc);
2102   %}
2103 
2104   enc_class RegMem_Hi(eRegL ereg, memory mem) %{    // emit_reg_mem
2105     int reg_encoding = HIGH_FROM_LOW($ereg$$reg);  // Hi register of pair, computed from lo
2106     int base  = $mem$$base;
2107     int index = $mem$$index;
2108     int scale = $mem$$scale;
2109     int displace = $mem$$disp + 4;      // Offset is 4 further in memory
2110     assert( $mem->disp_reloc() == relocInfo::none, "Cannot add 4 to oop" );
2111     encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, relocInfo::none);
2112   %}
2113 
2114   enc_class move_long_small_shift( eRegL dst, immI_1_31 cnt ) %{
2115     int r1, r2;
2116     if( $tertiary == 0xA4 ) { r1 = $dst$$reg;  r2 = HIGH_FROM_LOW($dst$$reg); }
2117     else                    { r2 = $dst$$reg;  r1 = HIGH_FROM_LOW($dst$$reg); }
2118     emit_opcode(cbuf,0x0F);
2119     emit_opcode(cbuf,$tertiary);
2120     emit_rm(cbuf, 0x3, r1, r2);
2121     emit_d8(cbuf,$cnt$$constant);
2122     emit_d8(cbuf,$primary);
2123     emit_rm(cbuf, 0x3, $secondary, r1);
2124     emit_d8(cbuf,$cnt$$constant);
2125   %}
2126 
2127   enc_class move_long_big_shift_sign( eRegL dst, immI_32_63 cnt ) %{
2128     emit_opcode( cbuf, 0x8B ); // Move
2129     emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW($dst$$reg));
2130     if( $cnt$$constant > 32 ) { // Shift, if not by zero
2131       emit_d8(cbuf,$primary);
2132       emit_rm(cbuf, 0x3, $secondary, $dst$$reg);
2133       emit_d8(cbuf,$cnt$$constant-32);
2134     }
2135     emit_d8(cbuf,$primary);
2136     emit_rm(cbuf, 0x3, $secondary, HIGH_FROM_LOW($dst$$reg));
2137     emit_d8(cbuf,31);
2138   %}
2139 
2140   enc_class move_long_big_shift_clr( eRegL dst, immI_32_63 cnt ) %{
2141     int r1, r2;
2142     if( $secondary == 0x5 ) { r1 = $dst$$reg;  r2 = HIGH_FROM_LOW($dst$$reg); }
2143     else                    { r2 = $dst$$reg;  r1 = HIGH_FROM_LOW($dst$$reg); }
2144 
2145     emit_opcode( cbuf, 0x8B ); // Move r1,r2
2146     emit_rm(cbuf, 0x3, r1, r2);
2147     if( $cnt$$constant > 32 ) { // Shift, if not by zero
2148       emit_opcode(cbuf,$primary);
2149       emit_rm(cbuf, 0x3, $secondary, r1);
2150       emit_d8(cbuf,$cnt$$constant-32);
2151     }
2152     emit_opcode(cbuf,0x33);  // XOR r2,r2
2153     emit_rm(cbuf, 0x3, r2, r2);
2154   %}
2155 
2156   // Clone of RegMem but accepts an extra parameter to access each
2157   // half of a double in memory; it never needs relocation info.
2158   enc_class Mov_MemD_half_to_Reg (immI opcode, memory mem, immI disp_for_half, rRegI rm_reg) %{
2159     emit_opcode(cbuf,$opcode$$constant);
2160     int reg_encoding = $rm_reg$$reg;
2161     int base     = $mem$$base;
2162     int index    = $mem$$index;
2163     int scale    = $mem$$scale;
2164     int displace = $mem$$disp + $disp_for_half$$constant;
2165     relocInfo::relocType disp_reloc = relocInfo::none;
2166     encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_reloc);
2167   %}
2168 
2169   // !!!!! Special Custom Code used by MemMove, and stack access instructions !!!!!
2170   //
2171   // Clone of RegMem except the RM-byte's reg/opcode field is an ADLC-time constant
2172   // and it never needs relocation information.
2173   // Frequently used to move data between FPU's Stack Top and memory.
2174   enc_class RMopc_Mem_no_oop (immI rm_opcode, memory mem) %{
2175     int rm_byte_opcode = $rm_opcode$$constant;
2176     int base     = $mem$$base;
2177     int index    = $mem$$index;
2178     int scale    = $mem$$scale;
2179     int displace = $mem$$disp;
2180     assert( $mem->disp_reloc() == relocInfo::none, "No oops here because no reloc info allowed" );
2181     encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace, relocInfo::none);
2182   %}
2183 
2184   enc_class RMopc_Mem (immI rm_opcode, memory mem) %{
2185     int rm_byte_opcode = $rm_opcode$$constant;
2186     int base     = $mem$$base;
2187     int index    = $mem$$index;
2188     int scale    = $mem$$scale;
2189     int displace = $mem$$disp;
2190     relocInfo::relocType disp_reloc = $mem->disp_reloc(); // disp-as-oop when working with static globals
2191     encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace, disp_reloc);
2192   %}
2193 
2194   enc_class RegLea (rRegI dst, rRegI src0, immI src1 ) %{    // emit_reg_lea
2195     int reg_encoding = $dst$$reg;
2196     int base         = $src0$$reg;      // 0xFFFFFFFF indicates no base
2197     int index        = 0x04;            // 0x04 indicates no index
2198     int scale        = 0x00;            // 0x00 indicates no scale
2199     int displace     = $src1$$constant; // 0x00 indicates no displacement
2200     relocInfo::relocType disp_reloc = relocInfo::none;
2201     encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_reloc);
2202   %}
2203 
2204   enc_class min_enc (rRegI dst, rRegI src) %{    // MIN
2205     // Compare dst,src
2206     emit_opcode(cbuf,0x3B);
2207     emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
2208     // jmp dst < src around move
2209     emit_opcode(cbuf,0x7C);
2210     emit_d8(cbuf,2);
2211     // move dst,src
2212     emit_opcode(cbuf,0x8B);
2213     emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
2214   %}
2215 
2216   enc_class max_enc (rRegI dst, rRegI src) %{    // MAX
2217     // Compare dst,src
2218     emit_opcode(cbuf,0x3B);
2219     emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
2220     // jmp dst > src around move
2221     emit_opcode(cbuf,0x7F);
2222     emit_d8(cbuf,2);
2223     // move dst,src
2224     emit_opcode(cbuf,0x8B);
2225     emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
2226   %}
2227 
2228   enc_class enc_FPR_store(memory mem, regDPR src) %{
2229     // If src is FPR1, we can just FST to store it.
2230     // Else we need to FLD it to FPR1, then FSTP to store/pop it.
2231     int reg_encoding = 0x2; // Just store
2232     int base  = $mem$$base;
2233     int index = $mem$$index;
2234     int scale = $mem$$scale;
2235     int displace = $mem$$disp;
2236     relocInfo::relocType disp_reloc = $mem->disp_reloc(); // disp-as-oop when working with static globals
2237     if( $src$$reg != FPR1L_enc ) {
2238       reg_encoding = 0x3;  // Store & pop
2239       emit_opcode( cbuf, 0xD9 ); // FLD (i.e., push it)
2240       emit_d8( cbuf, 0xC0-1+$src$$reg );
2241     }
2242     cbuf.set_insts_mark();       // Mark start of opcode for reloc info in mem operand
2243     emit_opcode(cbuf,$primary);
2244     encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_reloc);
2245   %}
2246 
2247   enc_class neg_reg(rRegI dst) %{
2248     // NEG $dst
2249     emit_opcode(cbuf,0xF7);
2250     emit_rm(cbuf, 0x3, 0x03, $dst$$reg );
2251   %}
2252 
2253   enc_class setLT_reg(eCXRegI dst) %{
2254     // SETLT $dst
2255     emit_opcode(cbuf,0x0F);
2256     emit_opcode(cbuf,0x9C);
2257     emit_rm( cbuf, 0x3, 0x4, $dst$$reg );
2258   %}
2259 
2260   enc_class enc_cmpLTP(ncxRegI p, ncxRegI q, ncxRegI y, eCXRegI tmp) %{    // cadd_cmpLT
2261     int tmpReg = $tmp$$reg;
2262 
2263     // SUB $p,$q
2264     emit_opcode(cbuf,0x2B);
2265     emit_rm(cbuf, 0x3, $p$$reg, $q$$reg);
2266     // SBB $tmp,$tmp
2267     emit_opcode(cbuf,0x1B);
2268     emit_rm(cbuf, 0x3, tmpReg, tmpReg);
2269     // AND $tmp,$y
2270     emit_opcode(cbuf,0x23);
2271     emit_rm(cbuf, 0x3, tmpReg, $y$$reg);
2272     // ADD $p,$tmp
2273     emit_opcode(cbuf,0x03);
2274     emit_rm(cbuf, 0x3, $p$$reg, tmpReg);
2275   %}
2276 
2277   enc_class shift_left_long( eRegL dst, eCXRegI shift ) %{
2278     // TEST shift,32
2279     emit_opcode(cbuf,0xF7);
2280     emit_rm(cbuf, 0x3, 0, ECX_enc);
2281     emit_d32(cbuf,0x20);
2282     // JEQ,s small
2283     emit_opcode(cbuf, 0x74);
2284     emit_d8(cbuf, 0x04);
2285     // MOV    $dst.hi,$dst.lo
2286     emit_opcode( cbuf, 0x8B );
2287     emit_rm(cbuf, 0x3, HIGH_FROM_LOW($dst$$reg), $dst$$reg );
2288     // CLR    $dst.lo
2289     emit_opcode(cbuf, 0x33);
2290     emit_rm(cbuf, 0x3, $dst$$reg, $dst$$reg);
2291 // small:
2292     // SHLD   $dst.hi,$dst.lo,$shift
2293     emit_opcode(cbuf,0x0F);
2294     emit_opcode(cbuf,0xA5);
2295     emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW($dst$$reg));
2296     // SHL    $dst.lo,$shift"
2297     emit_opcode(cbuf,0xD3);
2298     emit_rm(cbuf, 0x3, 0x4, $dst$$reg );
2299   %}
2300 
2301   enc_class shift_right_long( eRegL dst, eCXRegI shift ) %{
2302     // TEST shift,32
2303     emit_opcode(cbuf,0xF7);
2304     emit_rm(cbuf, 0x3, 0, ECX_enc);
2305     emit_d32(cbuf,0x20);
2306     // JEQ,s small
2307     emit_opcode(cbuf, 0x74);
2308     emit_d8(cbuf, 0x04);
2309     // MOV    $dst.lo,$dst.hi
2310     emit_opcode( cbuf, 0x8B );
2311     emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW($dst$$reg) );
2312     // CLR    $dst.hi
2313     emit_opcode(cbuf, 0x33);
2314     emit_rm(cbuf, 0x3, HIGH_FROM_LOW($dst$$reg), HIGH_FROM_LOW($dst$$reg));
2315 // small:
2316     // SHRD   $dst.lo,$dst.hi,$shift
2317     emit_opcode(cbuf,0x0F);
2318     emit_opcode(cbuf,0xAD);
2319     emit_rm(cbuf, 0x3, HIGH_FROM_LOW($dst$$reg), $dst$$reg);
2320     // SHR    $dst.hi,$shift"
2321     emit_opcode(cbuf,0xD3);
2322     emit_rm(cbuf, 0x3, 0x5, HIGH_FROM_LOW($dst$$reg) );
2323   %}
2324 
2325   enc_class shift_right_arith_long( eRegL dst, eCXRegI shift ) %{
2326     // TEST shift,32
2327     emit_opcode(cbuf,0xF7);
2328     emit_rm(cbuf, 0x3, 0, ECX_enc);
2329     emit_d32(cbuf,0x20);
2330     // JEQ,s small
2331     emit_opcode(cbuf, 0x74);
2332     emit_d8(cbuf, 0x05);
2333     // MOV    $dst.lo,$dst.hi
2334     emit_opcode( cbuf, 0x8B );
2335     emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW($dst$$reg) );
2336     // SAR    $dst.hi,31
2337     emit_opcode(cbuf, 0xC1);
2338     emit_rm(cbuf, 0x3, 7, HIGH_FROM_LOW($dst$$reg) );
2339     emit_d8(cbuf, 0x1F );
2340 // small:
2341     // SHRD   $dst.lo,$dst.hi,$shift
2342     emit_opcode(cbuf,0x0F);
2343     emit_opcode(cbuf,0xAD);
2344     emit_rm(cbuf, 0x3, HIGH_FROM_LOW($dst$$reg), $dst$$reg);
2345     // SAR    $dst.hi,$shift"
2346     emit_opcode(cbuf,0xD3);
2347     emit_rm(cbuf, 0x3, 0x7, HIGH_FROM_LOW($dst$$reg) );
2348   %}
2349 
2350 
2351   // ----------------- Encodings for floating point unit -----------------
2352   // May leave result in FPU-TOS or FPU reg depending on opcodes
2353   enc_class OpcReg_FPR(regFPR src) %{    // FMUL, FDIV
2354     $$$emit8$primary;
2355     emit_rm(cbuf, 0x3, $secondary, $src$$reg );
2356   %}
2357 
2358   // Pop argument in FPR0 with FSTP ST(0)
2359   enc_class PopFPU() %{
2360     emit_opcode( cbuf, 0xDD );
2361     emit_d8( cbuf, 0xD8 );
2362   %}
2363 
2364   // !!!!! equivalent to Pop_Reg_F
2365   enc_class Pop_Reg_DPR( regDPR dst ) %{
2366     emit_opcode( cbuf, 0xDD );           // FSTP   ST(i)
2367     emit_d8( cbuf, 0xD8+$dst$$reg );
2368   %}
2369 
2370   enc_class Push_Reg_DPR( regDPR dst ) %{
2371     emit_opcode( cbuf, 0xD9 );
2372     emit_d8( cbuf, 0xC0-1+$dst$$reg );   // FLD ST(i-1)
2373   %}
2374 
2375   enc_class strictfp_bias1( regDPR dst ) %{
2376     emit_opcode( cbuf, 0xDB );           // FLD m80real
2377     emit_opcode( cbuf, 0x2D );
2378     emit_d32( cbuf, (int)StubRoutines::addr_fpu_subnormal_bias1() );
2379     emit_opcode( cbuf, 0xDE );           // FMULP ST(dst), ST0
2380     emit_opcode( cbuf, 0xC8+$dst$$reg );
2381   %}
2382 
2383   enc_class strictfp_bias2( regDPR dst ) %{
2384     emit_opcode( cbuf, 0xDB );           // FLD m80real
2385     emit_opcode( cbuf, 0x2D );
2386     emit_d32( cbuf, (int)StubRoutines::addr_fpu_subnormal_bias2() );
2387     emit_opcode( cbuf, 0xDE );           // FMULP ST(dst), ST0
2388     emit_opcode( cbuf, 0xC8+$dst$$reg );
2389   %}
2390 
2391   // Special case for moving an integer register to a stack slot.
2392   enc_class OpcPRegSS( stackSlotI dst, rRegI src ) %{ // RegSS
2393     store_to_stackslot( cbuf, $primary, $src$$reg, $dst$$disp );
2394   %}
2395 
2396   // Special case for moving a register to a stack slot.
2397   enc_class RegSS( stackSlotI dst, rRegI src ) %{ // RegSS
2398     // Opcode already emitted
2399     emit_rm( cbuf, 0x02, $src$$reg, ESP_enc );   // R/M byte
2400     emit_rm( cbuf, 0x00, ESP_enc, ESP_enc);          // SIB byte
2401     emit_d32(cbuf, $dst$$disp);   // Displacement
2402   %}
2403 
2404   // Push the integer in stackSlot 'src' onto FP-stack
2405   enc_class Push_Mem_I( memory src ) %{    // FILD   [ESP+src]
2406     store_to_stackslot( cbuf, $primary, $secondary, $src$$disp );
2407   %}
2408 
2409   // Push FPU's TOS float to a stack-slot, and pop FPU-stack
2410   enc_class Pop_Mem_FPR( stackSlotF dst ) %{ // FSTP_S [ESP+dst]
2411     store_to_stackslot( cbuf, 0xD9, 0x03, $dst$$disp );
2412   %}
2413 
2414   // Same as Pop_Mem_F except for opcode
2415   // Push FPU's TOS double to a stack-slot, and pop FPU-stack
2416   enc_class Pop_Mem_DPR( stackSlotD dst ) %{ // FSTP_D [ESP+dst]
2417     store_to_stackslot( cbuf, 0xDD, 0x03, $dst$$disp );
2418   %}
2419 
2420   enc_class Pop_Reg_FPR( regFPR dst ) %{
2421     emit_opcode( cbuf, 0xDD );           // FSTP   ST(i)
2422     emit_d8( cbuf, 0xD8+$dst$$reg );
2423   %}
2424 
2425   enc_class Push_Reg_FPR( regFPR dst ) %{
2426     emit_opcode( cbuf, 0xD9 );           // FLD    ST(i-1)
2427     emit_d8( cbuf, 0xC0-1+$dst$$reg );
2428   %}
2429 
2430   // Push FPU's float to a stack-slot, and pop FPU-stack
2431   enc_class Pop_Mem_Reg_FPR( stackSlotF dst, regFPR src ) %{
2432     int pop = 0x02;
2433     if ($src$$reg != FPR1L_enc) {
2434       emit_opcode( cbuf, 0xD9 );         // FLD    ST(i-1)
2435       emit_d8( cbuf, 0xC0-1+$src$$reg );
2436       pop = 0x03;
2437     }
2438     store_to_stackslot( cbuf, 0xD9, pop, $dst$$disp ); // FST<P>_S  [ESP+dst]
2439   %}
2440 
2441   // Push FPU's double to a stack-slot, and pop FPU-stack
2442   enc_class Pop_Mem_Reg_DPR( stackSlotD dst, regDPR src ) %{
2443     int pop = 0x02;
2444     if ($src$$reg != FPR1L_enc) {
2445       emit_opcode( cbuf, 0xD9 );         // FLD    ST(i-1)
2446       emit_d8( cbuf, 0xC0-1+$src$$reg );
2447       pop = 0x03;
2448     }
2449     store_to_stackslot( cbuf, 0xDD, pop, $dst$$disp ); // FST<P>_D  [ESP+dst]
2450   %}
2451 
2452   // Push FPU's double to a FPU-stack-slot, and pop FPU-stack
2453   enc_class Pop_Reg_Reg_DPR( regDPR dst, regFPR src ) %{
2454     int pop = 0xD0 - 1; // -1 since we skip FLD
2455     if ($src$$reg != FPR1L_enc) {
2456       emit_opcode( cbuf, 0xD9 );         // FLD    ST(src-1)
2457       emit_d8( cbuf, 0xC0-1+$src$$reg );
2458       pop = 0xD8;
2459     }
2460     emit_opcode( cbuf, 0xDD );
2461     emit_d8( cbuf, pop+$dst$$reg );      // FST<P> ST(i)
2462   %}
2463 
2464 
2465   enc_class Push_Reg_Mod_DPR( regDPR dst, regDPR src) %{
2466     // load dst in FPR0
2467     emit_opcode( cbuf, 0xD9 );
2468     emit_d8( cbuf, 0xC0-1+$dst$$reg );
2469     if ($src$$reg != FPR1L_enc) {
2470       // fincstp
2471       emit_opcode (cbuf, 0xD9);
2472       emit_opcode (cbuf, 0xF7);
2473       // swap src with FPR1:
2474       // FXCH FPR1 with src
2475       emit_opcode(cbuf, 0xD9);
2476       emit_d8(cbuf, 0xC8-1+$src$$reg );
2477       // fdecstp
2478       emit_opcode (cbuf, 0xD9);
2479       emit_opcode (cbuf, 0xF6);
2480     }
2481   %}
2482 
2483   enc_class Push_ModD_encoding(regD src0, regD src1) %{
2484     MacroAssembler _masm(&cbuf);
2485     __ subptr(rsp, 8);
2486     __ movdbl(Address(rsp, 0), $src1$$XMMRegister);
2487     __ fld_d(Address(rsp, 0));
2488     __ movdbl(Address(rsp, 0), $src0$$XMMRegister);
2489     __ fld_d(Address(rsp, 0));
2490   %}
2491 
2492   enc_class Push_ModF_encoding(regF src0, regF src1) %{
2493     MacroAssembler _masm(&cbuf);
2494     __ subptr(rsp, 4);
2495     __ movflt(Address(rsp, 0), $src1$$XMMRegister);
2496     __ fld_s(Address(rsp, 0));
2497     __ movflt(Address(rsp, 0), $src0$$XMMRegister);
2498     __ fld_s(Address(rsp, 0));
2499   %}
2500 
2501   enc_class Push_ResultD(regD dst) %{
2502     MacroAssembler _masm(&cbuf);
2503     __ fstp_d(Address(rsp, 0));
2504     __ movdbl($dst$$XMMRegister, Address(rsp, 0));
2505     __ addptr(rsp, 8);
2506   %}
2507 
2508   enc_class Push_ResultF(regF dst, immI d8) %{
2509     MacroAssembler _masm(&cbuf);
2510     __ fstp_s(Address(rsp, 0));
2511     __ movflt($dst$$XMMRegister, Address(rsp, 0));
2512     __ addptr(rsp, $d8$$constant);
2513   %}
2514 
2515   enc_class Push_SrcD(regD src) %{
2516     MacroAssembler _masm(&cbuf);
2517     __ subptr(rsp, 8);
2518     __ movdbl(Address(rsp, 0), $src$$XMMRegister);
2519     __ fld_d(Address(rsp, 0));
2520   %}
2521 
2522   enc_class push_stack_temp_qword() %{
2523     MacroAssembler _masm(&cbuf);
2524     __ subptr(rsp, 8);
2525   %}
2526 
2527   enc_class pop_stack_temp_qword() %{
2528     MacroAssembler _masm(&cbuf);
2529     __ addptr(rsp, 8);
2530   %}
2531 
2532   enc_class push_xmm_to_fpr1(regD src) %{
2533     MacroAssembler _masm(&cbuf);
2534     __ movdbl(Address(rsp, 0), $src$$XMMRegister);
2535     __ fld_d(Address(rsp, 0));
2536   %}
2537 
2538   enc_class Push_Result_Mod_DPR( regDPR src) %{
2539     if ($src$$reg != FPR1L_enc) {
2540       // fincstp
2541       emit_opcode (cbuf, 0xD9);
2542       emit_opcode (cbuf, 0xF7);
2543       // FXCH FPR1 with src
2544       emit_opcode(cbuf, 0xD9);
2545       emit_d8(cbuf, 0xC8-1+$src$$reg );
2546       // fdecstp
2547       emit_opcode (cbuf, 0xD9);
2548       emit_opcode (cbuf, 0xF6);
2549     }
2550     // // following asm replaced with Pop_Reg_F or Pop_Mem_F
2551     // // FSTP   FPR$dst$$reg
2552     // emit_opcode( cbuf, 0xDD );
2553     // emit_d8( cbuf, 0xD8+$dst$$reg );
2554   %}
2555 
2556   enc_class fnstsw_sahf_skip_parity() %{
2557     // fnstsw ax
2558     emit_opcode( cbuf, 0xDF );
2559     emit_opcode( cbuf, 0xE0 );
2560     // sahf
2561     emit_opcode( cbuf, 0x9E );
2562     // jnp  ::skip
2563     emit_opcode( cbuf, 0x7B );
2564     emit_opcode( cbuf, 0x05 );
2565   %}
2566 
2567   enc_class emitModDPR() %{
2568     // fprem must be iterative
2569     // :: loop
2570     // fprem
2571     emit_opcode( cbuf, 0xD9 );
2572     emit_opcode( cbuf, 0xF8 );
2573     // wait
2574     emit_opcode( cbuf, 0x9b );
2575     // fnstsw ax
2576     emit_opcode( cbuf, 0xDF );
2577     emit_opcode( cbuf, 0xE0 );
2578     // sahf
2579     emit_opcode( cbuf, 0x9E );
2580     // jp  ::loop
2581     emit_opcode( cbuf, 0x0F );
2582     emit_opcode( cbuf, 0x8A );
2583     emit_opcode( cbuf, 0xF4 );
2584     emit_opcode( cbuf, 0xFF );
2585     emit_opcode( cbuf, 0xFF );
2586     emit_opcode( cbuf, 0xFF );
2587   %}
2588 
2589   enc_class fpu_flags() %{
2590     // fnstsw_ax
2591     emit_opcode( cbuf, 0xDF);
2592     emit_opcode( cbuf, 0xE0);
2593     // test ax,0x0400
2594     emit_opcode( cbuf, 0x66 );   // operand-size prefix for 16-bit immediate
2595     emit_opcode( cbuf, 0xA9 );
2596     emit_d16   ( cbuf, 0x0400 );
2597     // // // This sequence works, but stalls for 12-16 cycles on PPro
2598     // // test rax,0x0400
2599     // emit_opcode( cbuf, 0xA9 );
2600     // emit_d32   ( cbuf, 0x00000400 );
2601     //
2602     // jz exit (no unordered comparison)
2603     emit_opcode( cbuf, 0x74 );
2604     emit_d8    ( cbuf, 0x02 );
2605     // mov ah,1 - treat as LT case (set carry flag)
2606     emit_opcode( cbuf, 0xB4 );
2607     emit_d8    ( cbuf, 0x01 );
2608     // sahf
2609     emit_opcode( cbuf, 0x9E);
2610   %}
2611 
2612   enc_class cmpF_P6_fixup() %{
2613     // Fixup the integer flags in case comparison involved a NaN
2614     //
2615     // JNP exit (no unordered comparison, P-flag is set by NaN)
2616     emit_opcode( cbuf, 0x7B );
2617     emit_d8    ( cbuf, 0x03 );
2618     // MOV AH,1 - treat as LT case (set carry flag)
2619     emit_opcode( cbuf, 0xB4 );
2620     emit_d8    ( cbuf, 0x01 );
2621     // SAHF
2622     emit_opcode( cbuf, 0x9E);
2623     // NOP     // target for branch to avoid branch to branch
2624     emit_opcode( cbuf, 0x90);
2625   %}
2626 
2627 //     fnstsw_ax();
2628 //     sahf();
2629 //     movl(dst, nan_result);
2630 //     jcc(Assembler::parity, exit);
2631 //     movl(dst, less_result);
2632 //     jcc(Assembler::below, exit);
2633 //     movl(dst, equal_result);
2634 //     jcc(Assembler::equal, exit);
2635 //     movl(dst, greater_result);
2636 
2637 // less_result     =  1;
2638 // greater_result  = -1;
2639 // equal_result    = 0;
2640 // nan_result      = -1;
2641 
2642   enc_class CmpF_Result(rRegI dst) %{
2643     // fnstsw_ax();
2644     emit_opcode( cbuf, 0xDF);
2645     emit_opcode( cbuf, 0xE0);
2646     // sahf
2647     emit_opcode( cbuf, 0x9E);
2648     // movl(dst, nan_result);
2649     emit_opcode( cbuf, 0xB8 + $dst$$reg);
2650     emit_d32( cbuf, -1 );
2651     // jcc(Assembler::parity, exit);
2652     emit_opcode( cbuf, 0x7A );
2653     emit_d8    ( cbuf, 0x13 );
2654     // movl(dst, less_result);
2655     emit_opcode( cbuf, 0xB8 + $dst$$reg);
2656     emit_d32( cbuf, -1 );
2657     // jcc(Assembler::below, exit);
2658     emit_opcode( cbuf, 0x72 );
2659     emit_d8    ( cbuf, 0x0C );
2660     // movl(dst, equal_result);
2661     emit_opcode( cbuf, 0xB8 + $dst$$reg);
2662     emit_d32( cbuf, 0 );
2663     // jcc(Assembler::equal, exit);
2664     emit_opcode( cbuf, 0x74 );
2665     emit_d8    ( cbuf, 0x05 );
2666     // movl(dst, greater_result);
2667     emit_opcode( cbuf, 0xB8 + $dst$$reg);
2668     emit_d32( cbuf, 1 );
2669   %}
2670 
2671 
2672   // Compare the longs and set flags
2673   // BROKEN!  Do Not use as-is
2674   enc_class cmpl_test( eRegL src1, eRegL src2 ) %{
2675     // CMP    $src1.hi,$src2.hi
2676     emit_opcode( cbuf, 0x3B );
2677     emit_rm(cbuf, 0x3, HIGH_FROM_LOW($src1$$reg), HIGH_FROM_LOW($src2$$reg) );
2678     // JNE,s  done
2679     emit_opcode(cbuf,0x75);
2680     emit_d8(cbuf, 2 );
2681     // CMP    $src1.lo,$src2.lo
2682     emit_opcode( cbuf, 0x3B );
2683     emit_rm(cbuf, 0x3, $src1$$reg, $src2$$reg );
2684 // done:
2685   %}
2686 
2687   enc_class convert_int_long( regL dst, rRegI src ) %{
2688     // mov $dst.lo,$src
2689     int dst_encoding = $dst$$reg;
2690     int src_encoding = $src$$reg;
2691     encode_Copy( cbuf, dst_encoding  , src_encoding );
2692     // mov $dst.hi,$src
2693     encode_Copy( cbuf, HIGH_FROM_LOW(dst_encoding), src_encoding );
2694     // sar $dst.hi,31
2695     emit_opcode( cbuf, 0xC1 );
2696     emit_rm(cbuf, 0x3, 7, HIGH_FROM_LOW(dst_encoding) );
2697     emit_d8(cbuf, 0x1F );
2698   %}
2699 
2700   enc_class convert_long_double( eRegL src ) %{
2701     // push $src.hi
2702     emit_opcode(cbuf, 0x50+HIGH_FROM_LOW($src$$reg));
2703     // push $src.lo
2704     emit_opcode(cbuf, 0x50+$src$$reg  );
2705     // fild 64-bits at [SP]
2706     emit_opcode(cbuf,0xdf);
2707     emit_d8(cbuf, 0x6C);
2708     emit_d8(cbuf, 0x24);
2709     emit_d8(cbuf, 0x00);
2710     // pop stack
2711     emit_opcode(cbuf, 0x83); // add  SP, #8
2712     emit_rm(cbuf, 0x3, 0x00, ESP_enc);
2713     emit_d8(cbuf, 0x8);
2714   %}
2715 
2716   enc_class multiply_con_and_shift_high( eDXRegI dst, nadxRegI src1, eADXRegL_low_only src2, immI_32_63 cnt, eFlagsReg cr ) %{
2717     // IMUL   EDX:EAX,$src1
2718     emit_opcode( cbuf, 0xF7 );
2719     emit_rm( cbuf, 0x3, 0x5, $src1$$reg );
2720     // SAR    EDX,$cnt-32
2721     int shift_count = ((int)$cnt$$constant) - 32;
2722     if (shift_count > 0) {
2723       emit_opcode(cbuf, 0xC1);
2724       emit_rm(cbuf, 0x3, 7, $dst$$reg );
2725       emit_d8(cbuf, shift_count);
2726     }
2727   %}
2728 
2729   // this version doesn't have add sp, 8
2730   enc_class convert_long_double2( eRegL src ) %{
2731     // push $src.hi
2732     emit_opcode(cbuf, 0x50+HIGH_FROM_LOW($src$$reg));
2733     // push $src.lo
2734     emit_opcode(cbuf, 0x50+$src$$reg  );
2735     // fild 64-bits at [SP]
2736     emit_opcode(cbuf,0xdf);
2737     emit_d8(cbuf, 0x6C);
2738     emit_d8(cbuf, 0x24);
2739     emit_d8(cbuf, 0x00);
2740   %}
2741 
2742   enc_class long_int_multiply( eADXRegL dst, nadxRegI src) %{
2743     // Basic idea: long = (long)int * (long)int
2744     // IMUL EDX:EAX, src
2745     emit_opcode( cbuf, 0xF7 );
2746     emit_rm( cbuf, 0x3, 0x5, $src$$reg);
2747   %}
2748 
2749   enc_class long_uint_multiply( eADXRegL dst, nadxRegI src) %{
2750     // Basic Idea:  long = (int & 0xffffffffL) * (int & 0xffffffffL)
2751     // MUL EDX:EAX, src
2752     emit_opcode( cbuf, 0xF7 );
2753     emit_rm( cbuf, 0x3, 0x4, $src$$reg);
2754   %}
2755 
2756   enc_class long_multiply( eADXRegL dst, eRegL src, rRegI tmp ) %{
2757     // Basic idea: lo(result) = lo(x_lo * y_lo)
2758     //             hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi)
2759     // MOV    $tmp,$src.lo
2760     encode_Copy( cbuf, $tmp$$reg, $src$$reg );
2761     // IMUL   $tmp,EDX
2762     emit_opcode( cbuf, 0x0F );
2763     emit_opcode( cbuf, 0xAF );
2764     emit_rm( cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW($dst$$reg) );
2765     // MOV    EDX,$src.hi
2766     encode_Copy( cbuf, HIGH_FROM_LOW($dst$$reg), HIGH_FROM_LOW($src$$reg) );
2767     // IMUL   EDX,EAX
2768     emit_opcode( cbuf, 0x0F );
2769     emit_opcode( cbuf, 0xAF );
2770     emit_rm( cbuf, 0x3, HIGH_FROM_LOW($dst$$reg), $dst$$reg );
2771     // ADD    $tmp,EDX
2772     emit_opcode( cbuf, 0x03 );
2773     emit_rm( cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW($dst$$reg) );
2774     // MUL   EDX:EAX,$src.lo
2775     emit_opcode( cbuf, 0xF7 );
2776     emit_rm( cbuf, 0x3, 0x4, $src$$reg );
2777     // ADD    EDX,ESI
2778     emit_opcode( cbuf, 0x03 );
2779     emit_rm( cbuf, 0x3, HIGH_FROM_LOW($dst$$reg), $tmp$$reg );
2780   %}
2781 
2782   enc_class long_multiply_con( eADXRegL dst, immL_127 src, rRegI tmp ) %{
2783     // Basic idea: lo(result) = lo(src * y_lo)
2784     //             hi(result) = hi(src * y_lo) + lo(src * y_hi)
2785     // IMUL   $tmp,EDX,$src
2786     emit_opcode( cbuf, 0x6B );
2787     emit_rm( cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW($dst$$reg) );
2788     emit_d8( cbuf, (int)$src$$constant );
2789     // MOV    EDX,$src
2790     emit_opcode(cbuf, 0xB8 + EDX_enc);
2791     emit_d32( cbuf, (int)$src$$constant );
2792     // MUL   EDX:EAX,EDX
2793     emit_opcode( cbuf, 0xF7 );
2794     emit_rm( cbuf, 0x3, 0x4, EDX_enc );
2795     // ADD    EDX,ESI
2796     emit_opcode( cbuf, 0x03 );
2797     emit_rm( cbuf, 0x3, EDX_enc, $tmp$$reg );
2798   %}
2799 
2800   enc_class long_div( eRegL src1, eRegL src2 ) %{
2801     // PUSH src1.hi
2802     emit_opcode(cbuf, HIGH_FROM_LOW(0x50+$src1$$reg) );
2803     // PUSH src1.lo
2804     emit_opcode(cbuf,               0x50+$src1$$reg  );
2805     // PUSH src2.hi
2806     emit_opcode(cbuf, HIGH_FROM_LOW(0x50+$src2$$reg) );
2807     // PUSH src2.lo
2808     emit_opcode(cbuf,               0x50+$src2$$reg  );
2809     // CALL directly to the runtime
2810     cbuf.set_insts_mark();
2811     emit_opcode(cbuf,0xE8);       // Call into runtime
2812     emit_d32_reloc(cbuf, (CAST_FROM_FN_PTR(address, SharedRuntime::ldiv) - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
2813     // Restore stack
2814     emit_opcode(cbuf, 0x83); // add  SP, #framesize
2815     emit_rm(cbuf, 0x3, 0x00, ESP_enc);
2816     emit_d8(cbuf, 4*4);
2817   %}
2818 
2819   enc_class long_mod( eRegL src1, eRegL src2 ) %{
2820     // PUSH src1.hi
2821     emit_opcode(cbuf, HIGH_FROM_LOW(0x50+$src1$$reg) );
2822     // PUSH src1.lo
2823     emit_opcode(cbuf,               0x50+$src1$$reg  );
2824     // PUSH src2.hi
2825     emit_opcode(cbuf, HIGH_FROM_LOW(0x50+$src2$$reg) );
2826     // PUSH src2.lo
2827     emit_opcode(cbuf,               0x50+$src2$$reg  );
2828     // CALL directly to the runtime
2829     cbuf.set_insts_mark();
2830     emit_opcode(cbuf,0xE8);       // Call into runtime
2831     emit_d32_reloc(cbuf, (CAST_FROM_FN_PTR(address, SharedRuntime::lrem ) - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
2832     // Restore stack
2833     emit_opcode(cbuf, 0x83); // add  SP, #framesize
2834     emit_rm(cbuf, 0x3, 0x00, ESP_enc);
2835     emit_d8(cbuf, 4*4);
2836   %}
2837 
2838   enc_class long_cmp_flags0( eRegL src, rRegI tmp ) %{
2839     // MOV   $tmp,$src.lo
2840     emit_opcode(cbuf, 0x8B);
2841     emit_rm(cbuf, 0x3, $tmp$$reg, $src$$reg);
2842     // OR    $tmp,$src.hi
2843     emit_opcode(cbuf, 0x0B);
2844     emit_rm(cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW($src$$reg));
2845   %}
2846 
2847   enc_class long_cmp_flags1( eRegL src1, eRegL src2 ) %{
2848     // CMP    $src1.lo,$src2.lo
2849     emit_opcode( cbuf, 0x3B );
2850     emit_rm(cbuf, 0x3, $src1$$reg, $src2$$reg );
2851     // JNE,s  skip
2852     emit_cc(cbuf, 0x70, 0x5);
2853     emit_d8(cbuf,2);
2854     // CMP    $src1.hi,$src2.hi
2855     emit_opcode( cbuf, 0x3B );
2856     emit_rm(cbuf, 0x3, HIGH_FROM_LOW($src1$$reg), HIGH_FROM_LOW($src2$$reg) );
2857   %}
2858 
2859   enc_class long_cmp_flags2( eRegL src1, eRegL src2, rRegI tmp ) %{
2860     // CMP    $src1.lo,$src2.lo\t! Long compare; set flags for low bits
2861     emit_opcode( cbuf, 0x3B );
2862     emit_rm(cbuf, 0x3, $src1$$reg, $src2$$reg );
2863     // MOV    $tmp,$src1.hi
2864     emit_opcode( cbuf, 0x8B );
2865     emit_rm(cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW($src1$$reg) );
2866     // SBB   $tmp,$src2.hi\t! Compute flags for long compare
2867     emit_opcode( cbuf, 0x1B );
2868     emit_rm(cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW($src2$$reg) );
2869   %}
2870 
2871   enc_class long_cmp_flags3( eRegL src, rRegI tmp ) %{
2872     // XOR    $tmp,$tmp
2873     emit_opcode(cbuf,0x33);  // XOR
2874     emit_rm(cbuf,0x3, $tmp$$reg, $tmp$$reg);
2875     // CMP    $tmp,$src.lo
2876     emit_opcode( cbuf, 0x3B );
2877     emit_rm(cbuf, 0x3, $tmp$$reg, $src$$reg );
2878     // SBB    $tmp,$src.hi
2879     emit_opcode( cbuf, 0x1B );
2880     emit_rm(cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW($src$$reg) );
2881   %}
2882 
2883  // Sniff, sniff... smells like Gnu Superoptimizer
2884   enc_class neg_long( eRegL dst ) %{
2885     emit_opcode(cbuf,0xF7);    // NEG hi
2886     emit_rm    (cbuf,0x3, 0x3, HIGH_FROM_LOW($dst$$reg));
2887     emit_opcode(cbuf,0xF7);    // NEG lo
2888     emit_rm    (cbuf,0x3, 0x3,               $dst$$reg );
2889     emit_opcode(cbuf,0x83);    // SBB hi,0
2890     emit_rm    (cbuf,0x3, 0x3, HIGH_FROM_LOW($dst$$reg));
2891     emit_d8    (cbuf,0 );
2892   %}
2893 
2894   enc_class enc_pop_rdx() %{
2895     emit_opcode(cbuf,0x5A);
2896   %}
2897 
2898   enc_class enc_rethrow() %{
2899     cbuf.set_insts_mark();
2900     emit_opcode(cbuf, 0xE9);        // jmp    entry
2901     emit_d32_reloc(cbuf, (int)OptoRuntime::rethrow_stub() - ((int)cbuf.insts_end())-4,
2902                    runtime_call_Relocation::spec(), RELOC_IMM32 );
2903   %}
2904 
2905 
2906   // Convert a double to an int.  Java semantics require we do complex
2907   // manglelations in the corner cases.  So we set the rounding mode to
2908   // 'zero', store the darned double down as an int, and reset the
2909   // rounding mode to 'nearest'.  The hardware throws an exception which
2910   // patches up the correct value directly to the stack.
2911   enc_class DPR2I_encoding( regDPR src ) %{
2912     // Flip to round-to-zero mode.  We attempted to allow invalid-op
2913     // exceptions here, so that a NAN or other corner-case value will
2914     // thrown an exception (but normal values get converted at full speed).
2915     // However, I2C adapters and other float-stack manglers leave pending
2916     // invalid-op exceptions hanging.  We would have to clear them before
2917     // enabling them and that is more expensive than just testing for the
2918     // invalid value Intel stores down in the corner cases.
2919     emit_opcode(cbuf,0xD9);            // FLDCW  trunc
2920     emit_opcode(cbuf,0x2D);
2921     emit_d32(cbuf,(int)StubRoutines::addr_fpu_cntrl_wrd_trunc());
2922     // Allocate a word
2923     emit_opcode(cbuf,0x83);            // SUB ESP,4
2924     emit_opcode(cbuf,0xEC);
2925     emit_d8(cbuf,0x04);
2926     // Encoding assumes a double has been pushed into FPR0.
2927     // Store down the double as an int, popping the FPU stack
2928     emit_opcode(cbuf,0xDB);            // FISTP [ESP]
2929     emit_opcode(cbuf,0x1C);
2930     emit_d8(cbuf,0x24);
2931     // Restore the rounding mode; mask the exception
2932     emit_opcode(cbuf,0xD9);            // FLDCW   std/24-bit mode
2933     emit_opcode(cbuf,0x2D);
2934     emit_d32( cbuf, Compile::current()->in_24_bit_fp_mode()
2935         ? (int)StubRoutines::addr_fpu_cntrl_wrd_24()
2936         : (int)StubRoutines::addr_fpu_cntrl_wrd_std());
2937 
2938     // Load the converted int; adjust CPU stack
2939     emit_opcode(cbuf,0x58);       // POP EAX
2940     emit_opcode(cbuf,0x3D);       // CMP EAX,imm
2941     emit_d32   (cbuf,0x80000000); //         0x80000000
2942     emit_opcode(cbuf,0x75);       // JNE around_slow_call
2943     emit_d8    (cbuf,0x07);       // Size of slow_call
2944     // Push src onto stack slow-path
2945     emit_opcode(cbuf,0xD9 );      // FLD     ST(i)
2946     emit_d8    (cbuf,0xC0-1+$src$$reg );
2947     // CALL directly to the runtime
2948     cbuf.set_insts_mark();
2949     emit_opcode(cbuf,0xE8);       // Call into runtime
2950     emit_d32_reloc(cbuf, (StubRoutines::d2i_wrapper() - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
2951     // Carry on here...
2952   %}
2953 
2954   enc_class DPR2L_encoding( regDPR src ) %{
2955     emit_opcode(cbuf,0xD9);            // FLDCW  trunc
2956     emit_opcode(cbuf,0x2D);
2957     emit_d32(cbuf,(int)StubRoutines::addr_fpu_cntrl_wrd_trunc());
2958     // Allocate a word
2959     emit_opcode(cbuf,0x83);            // SUB ESP,8
2960     emit_opcode(cbuf,0xEC);
2961     emit_d8(cbuf,0x08);
2962     // Encoding assumes a double has been pushed into FPR0.
2963     // Store down the double as a long, popping the FPU stack
2964     emit_opcode(cbuf,0xDF);            // FISTP [ESP]
2965     emit_opcode(cbuf,0x3C);
2966     emit_d8(cbuf,0x24);
2967     // Restore the rounding mode; mask the exception
2968     emit_opcode(cbuf,0xD9);            // FLDCW   std/24-bit mode
2969     emit_opcode(cbuf,0x2D);
2970     emit_d32( cbuf, Compile::current()->in_24_bit_fp_mode()
2971         ? (int)StubRoutines::addr_fpu_cntrl_wrd_24()
2972         : (int)StubRoutines::addr_fpu_cntrl_wrd_std());
2973 
2974     // Load the converted int; adjust CPU stack
2975     emit_opcode(cbuf,0x58);       // POP EAX
2976     emit_opcode(cbuf,0x5A);       // POP EDX
2977     emit_opcode(cbuf,0x81);       // CMP EDX,imm
2978     emit_d8    (cbuf,0xFA);       // rdx
2979     emit_d32   (cbuf,0x80000000); //         0x80000000
2980     emit_opcode(cbuf,0x75);       // JNE around_slow_call
2981     emit_d8    (cbuf,0x07+4);     // Size of slow_call
2982     emit_opcode(cbuf,0x85);       // TEST EAX,EAX
2983     emit_opcode(cbuf,0xC0);       // 2/rax,/rax,
2984     emit_opcode(cbuf,0x75);       // JNE around_slow_call
2985     emit_d8    (cbuf,0x07);       // Size of slow_call
2986     // Push src onto stack slow-path
2987     emit_opcode(cbuf,0xD9 );      // FLD     ST(i)
2988     emit_d8    (cbuf,0xC0-1+$src$$reg );
2989     // CALL directly to the runtime
2990     cbuf.set_insts_mark();
2991     emit_opcode(cbuf,0xE8);       // Call into runtime
2992     emit_d32_reloc(cbuf, (StubRoutines::d2l_wrapper() - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
2993     // Carry on here...
2994   %}
2995 
2996   enc_class FMul_ST_reg( eRegFPR src1 ) %{
2997     // Operand was loaded from memory into fp ST (stack top)
2998     // FMUL   ST,$src  /* D8 C8+i */
2999     emit_opcode(cbuf, 0xD8);
3000     emit_opcode(cbuf, 0xC8 + $src1$$reg);
3001   %}
3002 
3003   enc_class FAdd_ST_reg( eRegFPR src2 ) %{
3004     // FADDP  ST,src2  /* D8 C0+i */
3005     emit_opcode(cbuf, 0xD8);
3006     emit_opcode(cbuf, 0xC0 + $src2$$reg);
3007     //could use FADDP  src2,fpST  /* DE C0+i */
3008   %}
3009 
3010   enc_class FAddP_reg_ST( eRegFPR src2 ) %{
3011     // FADDP  src2,ST  /* DE C0+i */
3012     emit_opcode(cbuf, 0xDE);
3013     emit_opcode(cbuf, 0xC0 + $src2$$reg);
3014   %}
3015 
3016   enc_class subFPR_divFPR_encode( eRegFPR src1, eRegFPR src2) %{
3017     // Operand has been loaded into fp ST (stack top)
3018       // FSUB   ST,$src1
3019       emit_opcode(cbuf, 0xD8);
3020       emit_opcode(cbuf, 0xE0 + $src1$$reg);
3021 
3022       // FDIV
3023       emit_opcode(cbuf, 0xD8);
3024       emit_opcode(cbuf, 0xF0 + $src2$$reg);
3025   %}
3026 
3027   enc_class MulFAddF (eRegFPR src1, eRegFPR src2) %{
3028     // Operand was loaded from memory into fp ST (stack top)
3029     // FADD   ST,$src  /* D8 C0+i */
3030     emit_opcode(cbuf, 0xD8);
3031     emit_opcode(cbuf, 0xC0 + $src1$$reg);
3032 
3033     // FMUL  ST,src2  /* D8 C*+i */
3034     emit_opcode(cbuf, 0xD8);
3035     emit_opcode(cbuf, 0xC8 + $src2$$reg);
3036   %}
3037 
3038 
3039   enc_class MulFAddFreverse (eRegFPR src1, eRegFPR src2) %{
3040     // Operand was loaded from memory into fp ST (stack top)
3041     // FADD   ST,$src  /* D8 C0+i */
3042     emit_opcode(cbuf, 0xD8);
3043     emit_opcode(cbuf, 0xC0 + $src1$$reg);
3044 
3045     // FMULP  src2,ST  /* DE C8+i */
3046     emit_opcode(cbuf, 0xDE);
3047     emit_opcode(cbuf, 0xC8 + $src2$$reg);
3048   %}
3049 
3050   // Atomically load the volatile long
3051   enc_class enc_loadL_volatile( memory mem, stackSlotL dst ) %{
3052     emit_opcode(cbuf,0xDF);
3053     int rm_byte_opcode = 0x05;
3054     int base     = $mem$$base;
3055     int index    = $mem$$index;
3056     int scale    = $mem$$scale;
3057     int displace = $mem$$disp;
3058     relocInfo::relocType disp_reloc = $mem->disp_reloc(); // disp-as-oop when working with static globals
3059     encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace, disp_reloc);
3060     store_to_stackslot( cbuf, 0x0DF, 0x07, $dst$$disp );
3061   %}
3062 
3063   // Volatile Store Long.  Must be atomic, so move it into
3064   // the FP TOS and then do a 64-bit FIST.  Has to probe the
3065   // target address before the store (for null-ptr checks)
3066   // so the memory operand is used twice in the encoding.
3067   enc_class enc_storeL_volatile( memory mem, stackSlotL src ) %{
3068     store_to_stackslot( cbuf, 0x0DF, 0x05, $src$$disp );
3069     cbuf.set_insts_mark();            // Mark start of FIST in case $mem has an oop
3070     emit_opcode(cbuf,0xDF);
3071     int rm_byte_opcode = 0x07;
3072     int base     = $mem$$base;
3073     int index    = $mem$$index;
3074     int scale    = $mem$$scale;
3075     int displace = $mem$$disp;
3076     relocInfo::relocType disp_reloc = $mem->disp_reloc(); // disp-as-oop when working with static globals
3077     encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace, disp_reloc);
3078   %}
3079 
3080   // Safepoint Poll.  This polls the safepoint page, and causes an
3081   // exception if it is not readable. Unfortunately, it kills the condition code
3082   // in the process
3083   // We current use TESTL [spp],EDI
3084   // A better choice might be TESTB [spp + pagesize() - CacheLineSize()],0
3085 
3086   enc_class Safepoint_Poll() %{
3087     cbuf.relocate(cbuf.insts_mark(), relocInfo::poll_type, 0);
3088     emit_opcode(cbuf,0x85);
3089     emit_rm (cbuf, 0x0, 0x7, 0x5);
3090     emit_d32(cbuf, (intptr_t)os::get_polling_page());
3091   %}
3092 %}
3093 
3094 
3095 //----------FRAME--------------------------------------------------------------
3096 // Definition of frame structure and management information.
3097 //
3098 //  S T A C K   L A Y O U T    Allocators stack-slot number
3099 //                             |   (to get allocators register number
3100 //  G  Owned by    |        |  v    add OptoReg::stack0())
3101 //  r   CALLER     |        |
3102 //  o     |        +--------+      pad to even-align allocators stack-slot
3103 //  w     V        |  pad0  |        numbers; owned by CALLER
3104 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
3105 //  h     ^        |   in   |  5
3106 //        |        |  args  |  4   Holes in incoming args owned by SELF
3107 //  |     |        |        |  3
3108 //  |     |        +--------+
3109 //  V     |        | old out|      Empty on Intel, window on Sparc
3110 //        |    old |preserve|      Must be even aligned.
3111 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
3112 //        |        |   in   |  3   area for Intel ret address
3113 //     Owned by    |preserve|      Empty on Sparc.
3114 //       SELF      +--------+
3115 //        |        |  pad2  |  2   pad to align old SP
3116 //        |        +--------+  1
3117 //        |        | locks  |  0
3118 //        |        +--------+----> OptoReg::stack0(), even aligned
3119 //        |        |  pad1  | 11   pad to align new SP
3120 //        |        +--------+
3121 //        |        |        | 10
3122 //        |        | spills |  9   spills
3123 //        V        |        |  8   (pad0 slot for callee)
3124 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
3125 //        ^        |  out   |  7
3126 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
3127 //     Owned by    +--------+
3128 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
3129 //        |    new |preserve|      Must be even-aligned.
3130 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
3131 //        |        |        |
3132 //
3133 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
3134 //         known from SELF's arguments and the Java calling convention.
3135 //         Region 6-7 is determined per call site.
3136 // Note 2: If the calling convention leaves holes in the incoming argument
3137 //         area, those holes are owned by SELF.  Holes in the outgoing area
3138 //         are owned by the CALLEE.  Holes should not be nessecary in the
3139 //         incoming area, as the Java calling convention is completely under
3140 //         the control of the AD file.  Doubles can be sorted and packed to
3141 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
3142 //         varargs C calling conventions.
3143 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
3144 //         even aligned with pad0 as needed.
3145 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
3146 //         region 6-11 is even aligned; it may be padded out more so that
3147 //         the region from SP to FP meets the minimum stack alignment.
3148 
3149 frame %{
3150   // What direction does stack grow in (assumed to be same for C & Java)
3151   stack_direction(TOWARDS_LOW);
3152 
3153   // These three registers define part of the calling convention
3154   // between compiled code and the interpreter.
3155   inline_cache_reg(EAX);                // Inline Cache Register
3156   interpreter_method_oop_reg(EBX);      // Method Oop Register when calling interpreter
3157 
3158   // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3159   cisc_spilling_operand_name(indOffset32);
3160 
3161   // Number of stack slots consumed by locking an object
3162   sync_stack_slots(1);
3163 
3164   // Compiled code's Frame Pointer
3165   frame_pointer(ESP);
3166   // Interpreter stores its frame pointer in a register which is
3167   // stored to the stack by I2CAdaptors.
3168   // I2CAdaptors convert from interpreted java to compiled java.
3169   interpreter_frame_pointer(EBP);
3170 
3171   // Stack alignment requirement
3172   // Alignment size in bytes (128-bit -> 16 bytes)
3173   stack_alignment(StackAlignmentInBytes);
3174 
3175   // Number of stack slots between incoming argument block and the start of
3176   // a new frame.  The PROLOG must add this many slots to the stack.  The
3177   // EPILOG must remove this many slots.  Intel needs one slot for
3178   // return address and one for rbp, (must save rbp)
3179   in_preserve_stack_slots(2+VerifyStackAtCalls);
3180 
3181   // Number of outgoing stack slots killed above the out_preserve_stack_slots
3182   // for calls to C.  Supports the var-args backing area for register parms.
3183   varargs_C_out_slots_killed(0);
3184 
3185   // The after-PROLOG location of the return address.  Location of
3186   // return address specifies a type (REG or STACK) and a number
3187   // representing the register number (i.e. - use a register name) or
3188   // stack slot.
3189   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3190   // Otherwise, it is above the locks and verification slot and alignment word
3191   return_addr(STACK - 1 +
3192               round_to((Compile::current()->in_preserve_stack_slots() +
3193                         Compile::current()->fixed_slots()),
3194                        stack_alignment_in_slots()));
3195 
3196   // Body of function which returns an integer array locating
3197   // arguments either in registers or in stack slots.  Passed an array
3198   // of ideal registers called "sig" and a "length" count.  Stack-slot
3199   // offsets are based on outgoing arguments, i.e. a CALLER setting up
3200   // arguments for a CALLEE.  Incoming stack arguments are
3201   // automatically biased by the preserve_stack_slots field above.
3202   calling_convention %{
3203     // No difference between ingoing/outgoing just pass false
3204     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3205   %}
3206 
3207 
3208   // Body of function which returns an integer array locating
3209   // arguments either in registers or in stack slots.  Passed an array
3210   // of ideal registers called "sig" and a "length" count.  Stack-slot
3211   // offsets are based on outgoing arguments, i.e. a CALLER setting up
3212   // arguments for a CALLEE.  Incoming stack arguments are
3213   // automatically biased by the preserve_stack_slots field above.
3214   c_calling_convention %{
3215     // This is obviously always outgoing
3216     (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3217   %}
3218 
3219   // Location of C & interpreter return values
3220   c_return_value %{
3221     assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3222     static int lo[Op_RegL+1] = { 0, 0, OptoReg::Bad, EAX_num,      EAX_num,      FPR1L_num,    FPR1L_num, EAX_num };
3223     static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, FPR1H_num, EDX_num };
3224 
3225     // in SSE2+ mode we want to keep the FPU stack clean so pretend
3226     // that C functions return float and double results in XMM0.
3227     if( ideal_reg == Op_RegD && UseSSE>=2 )
3228       return OptoRegPair(XMM0b_num,XMM0_num);
3229     if( ideal_reg == Op_RegF && UseSSE>=2 )
3230       return OptoRegPair(OptoReg::Bad,XMM0_num);
3231 
3232     return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3233   %}
3234 
3235   // Location of return values
3236   return_value %{
3237     assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3238     static int lo[Op_RegL+1] = { 0, 0, OptoReg::Bad, EAX_num,      EAX_num,      FPR1L_num,    FPR1L_num, EAX_num };
3239     static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, FPR1H_num, EDX_num };
3240     if( ideal_reg == Op_RegD && UseSSE>=2 )
3241       return OptoRegPair(XMM0b_num,XMM0_num);
3242     if( ideal_reg == Op_RegF && UseSSE>=1 )
3243       return OptoRegPair(OptoReg::Bad,XMM0_num);
3244     return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3245   %}
3246 
3247 %}
3248 
3249 //----------ATTRIBUTES---------------------------------------------------------
3250 //----------Operand Attributes-------------------------------------------------
3251 op_attrib op_cost(0);        // Required cost attribute
3252 
3253 //----------Instruction Attributes---------------------------------------------
3254 ins_attrib ins_cost(100);       // Required cost attribute
3255 ins_attrib ins_size(8);         // Required size attribute (in bits)
3256 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3257                                 // non-matching short branch variant of some
3258                                                             // long branch?
3259 ins_attrib ins_alignment(1);    // Required alignment attribute (must be a power of 2)
3260                                 // specifies the alignment that some part of the instruction (not
3261                                 // necessarily the start) requires.  If > 1, a compute_padding()
3262                                 // function must be provided for the instruction
3263 
3264 //----------OPERANDS-----------------------------------------------------------
3265 // Operand definitions must precede instruction definitions for correct parsing
3266 // in the ADLC because operands constitute user defined types which are used in
3267 // instruction definitions.
3268 
3269 //----------Simple Operands----------------------------------------------------
3270 // Immediate Operands
3271 // Integer Immediate
3272 operand immI() %{
3273   match(ConI);
3274 
3275   op_cost(10);
3276   format %{ %}
3277   interface(CONST_INTER);
3278 %}
3279 
3280 // Constant for test vs zero
3281 operand immI0() %{
3282   predicate(n->get_int() == 0);
3283   match(ConI);
3284 
3285   op_cost(0);
3286   format %{ %}
3287   interface(CONST_INTER);
3288 %}
3289 
3290 // Constant for increment
3291 operand immI1() %{
3292   predicate(n->get_int() == 1);
3293   match(ConI);
3294 
3295   op_cost(0);
3296   format %{ %}
3297   interface(CONST_INTER);
3298 %}
3299 
3300 // Constant for decrement
3301 operand immI_M1() %{
3302   predicate(n->get_int() == -1);
3303   match(ConI);
3304 
3305   op_cost(0);
3306   format %{ %}
3307   interface(CONST_INTER);
3308 %}
3309 
3310 // Valid scale values for addressing modes
3311 operand immI2() %{
3312   predicate(0 <= n->get_int() && (n->get_int() <= 3));
3313   match(ConI);
3314 
3315   format %{ %}
3316   interface(CONST_INTER);
3317 %}
3318 
3319 operand immI8() %{
3320   predicate((-128 <= n->get_int()) && (n->get_int() <= 127));
3321   match(ConI);
3322 
3323   op_cost(5);
3324   format %{ %}
3325   interface(CONST_INTER);
3326 %}
3327 
3328 operand immI16() %{
3329   predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
3330   match(ConI);
3331 
3332   op_cost(10);
3333   format %{ %}
3334   interface(CONST_INTER);
3335 %}
3336 
3337 // Int Immediate non-negative
3338 operand immU31()
3339 %{
3340   predicate(n->get_int() >= 0);
3341   match(ConI);
3342 
3343   op_cost(0);
3344   format %{ %}
3345   interface(CONST_INTER);
3346 %}
3347 
3348 // Constant for long shifts
3349 operand immI_32() %{
3350   predicate( n->get_int() == 32 );
3351   match(ConI);
3352 
3353   op_cost(0);
3354   format %{ %}
3355   interface(CONST_INTER);
3356 %}
3357 
3358 operand immI_1_31() %{
3359   predicate( n->get_int() >= 1 && n->get_int() <= 31 );
3360   match(ConI);
3361 
3362   op_cost(0);
3363   format %{ %}
3364   interface(CONST_INTER);
3365 %}
3366 
3367 operand immI_32_63() %{
3368   predicate( n->get_int() >= 32 && n->get_int() <= 63 );
3369   match(ConI);
3370   op_cost(0);
3371 
3372   format %{ %}
3373   interface(CONST_INTER);
3374 %}
3375 
3376 operand immI_1() %{
3377   predicate( n->get_int() == 1 );
3378   match(ConI);
3379 
3380   op_cost(0);
3381   format %{ %}
3382   interface(CONST_INTER);
3383 %}
3384 
3385 operand immI_2() %{
3386   predicate( n->get_int() == 2 );
3387   match(ConI);
3388 
3389   op_cost(0);
3390   format %{ %}
3391   interface(CONST_INTER);
3392 %}
3393 
3394 operand immI_3() %{
3395   predicate( n->get_int() == 3 );
3396   match(ConI);
3397 
3398   op_cost(0);
3399   format %{ %}
3400   interface(CONST_INTER);
3401 %}
3402 
3403 // Pointer Immediate
3404 operand immP() %{
3405   match(ConP);
3406 
3407   op_cost(10);
3408   format %{ %}
3409   interface(CONST_INTER);
3410 %}
3411 
3412 // NULL Pointer Immediate
3413 operand immP0() %{
3414   predicate( n->get_ptr() == 0 );
3415   match(ConP);
3416   op_cost(0);
3417 
3418   format %{ %}
3419   interface(CONST_INTER);
3420 %}
3421 
3422 // Long Immediate
3423 operand immL() %{
3424   match(ConL);
3425 
3426   op_cost(20);
3427   format %{ %}
3428   interface(CONST_INTER);
3429 %}
3430 
3431 // Long Immediate zero
3432 operand immL0() %{
3433   predicate( n->get_long() == 0L );
3434   match(ConL);
3435   op_cost(0);
3436 
3437   format %{ %}
3438   interface(CONST_INTER);
3439 %}
3440 
3441 // Long Immediate zero
3442 operand immL_M1() %{
3443   predicate( n->get_long() == -1L );
3444   match(ConL);
3445   op_cost(0);
3446 
3447   format %{ %}
3448   interface(CONST_INTER);
3449 %}
3450 
3451 // Long immediate from 0 to 127.
3452 // Used for a shorter form of long mul by 10.
3453 operand immL_127() %{
3454   predicate((0 <= n->get_long()) && (n->get_long() <= 127));
3455   match(ConL);
3456   op_cost(0);
3457 
3458   format %{ %}
3459   interface(CONST_INTER);
3460 %}
3461 
3462 // Long Immediate: low 32-bit mask
3463 operand immL_32bits() %{
3464   predicate(n->get_long() == 0xFFFFFFFFL);
3465   match(ConL);
3466   op_cost(0);
3467 
3468   format %{ %}
3469   interface(CONST_INTER);
3470 %}
3471 
3472 // Long Immediate: low 32-bit mask
3473 operand immL32() %{
3474   predicate(n->get_long() == (int)(n->get_long()));
3475   match(ConL);
3476   op_cost(20);
3477 
3478   format %{ %}
3479   interface(CONST_INTER);
3480 %}
3481 
3482 //Double Immediate zero
3483 operand immDPR0() %{
3484   // Do additional (and counter-intuitive) test against NaN to work around VC++
3485   // bug that generates code such that NaNs compare equal to 0.0
3486   predicate( UseSSE<=1 && n->getd() == 0.0 && !g_isnan(n->getd()) );
3487   match(ConD);
3488 
3489   op_cost(5);
3490   format %{ %}
3491   interface(CONST_INTER);
3492 %}
3493 
3494 // Double Immediate one
3495 operand immDPR1() %{
3496   predicate( UseSSE<=1 && n->getd() == 1.0 );
3497   match(ConD);
3498 
3499   op_cost(5);
3500   format %{ %}
3501   interface(CONST_INTER);
3502 %}
3503 
3504 // Double Immediate
3505 operand immDPR() %{
3506   predicate(UseSSE<=1);
3507   match(ConD);
3508 
3509   op_cost(5);
3510   format %{ %}
3511   interface(CONST_INTER);
3512 %}
3513 
3514 operand immD() %{
3515   predicate(UseSSE>=2);
3516   match(ConD);
3517 
3518   op_cost(5);
3519   format %{ %}
3520   interface(CONST_INTER);
3521 %}
3522 
3523 // Double Immediate zero
3524 operand immD0() %{
3525   // Do additional (and counter-intuitive) test against NaN to work around VC++
3526   // bug that generates code such that NaNs compare equal to 0.0 AND do not
3527   // compare equal to -0.0.
3528   predicate( UseSSE>=2 && jlong_cast(n->getd()) == 0 );
3529   match(ConD);
3530 
3531   format %{ %}
3532   interface(CONST_INTER);
3533 %}
3534 
3535 // Float Immediate zero
3536 operand immFPR0() %{
3537   predicate(UseSSE == 0 && n->getf() == 0.0F);
3538   match(ConF);
3539 
3540   op_cost(5);
3541   format %{ %}
3542   interface(CONST_INTER);
3543 %}
3544 
3545 // Float Immediate one
3546 operand immFPR1() %{
3547   predicate(UseSSE == 0 && n->getf() == 1.0F);
3548   match(ConF);
3549 
3550   op_cost(5);
3551   format %{ %}
3552   interface(CONST_INTER);
3553 %}
3554 
3555 // Float Immediate
3556 operand immFPR() %{
3557   predicate( UseSSE == 0 );
3558   match(ConF);
3559 
3560   op_cost(5);
3561   format %{ %}
3562   interface(CONST_INTER);
3563 %}
3564 
3565 // Float Immediate
3566 operand immF() %{
3567   predicate(UseSSE >= 1);
3568   match(ConF);
3569 
3570   op_cost(5);
3571   format %{ %}
3572   interface(CONST_INTER);
3573 %}
3574 
3575 // Float Immediate zero.  Zero and not -0.0
3576 operand immF0() %{
3577   predicate( UseSSE >= 1 && jint_cast(n->getf()) == 0 );
3578   match(ConF);
3579 
3580   op_cost(5);
3581   format %{ %}
3582   interface(CONST_INTER);
3583 %}
3584 
3585 // Immediates for special shifts (sign extend)
3586 
3587 // Constants for increment
3588 operand immI_16() %{
3589   predicate( n->get_int() == 16 );
3590   match(ConI);
3591 
3592   format %{ %}
3593   interface(CONST_INTER);
3594 %}
3595 
3596 operand immI_24() %{
3597   predicate( n->get_int() == 24 );
3598   match(ConI);
3599 
3600   format %{ %}
3601   interface(CONST_INTER);
3602 %}
3603 
3604 // Constant for byte-wide masking
3605 operand immI_255() %{
3606   predicate( n->get_int() == 255 );
3607   match(ConI);
3608 
3609   format %{ %}
3610   interface(CONST_INTER);
3611 %}
3612 
3613 // Constant for short-wide masking
3614 operand immI_65535() %{
3615   predicate(n->get_int() == 65535);
3616   match(ConI);
3617 
3618   format %{ %}
3619   interface(CONST_INTER);
3620 %}
3621 
3622 // Register Operands
3623 // Integer Register
3624 operand rRegI() %{
3625   constraint(ALLOC_IN_RC(int_reg));
3626   match(RegI);
3627   match(xRegI);
3628   match(eAXRegI);
3629   match(eBXRegI);
3630   match(eCXRegI);
3631   match(eDXRegI);
3632   match(eDIRegI);
3633   match(eSIRegI);
3634 
3635   format %{ %}
3636   interface(REG_INTER);
3637 %}
3638 
3639 // Subset of Integer Register
3640 operand xRegI(rRegI reg) %{
3641   constraint(ALLOC_IN_RC(int_x_reg));
3642   match(reg);
3643   match(eAXRegI);
3644   match(eBXRegI);
3645   match(eCXRegI);
3646   match(eDXRegI);
3647 
3648   format %{ %}
3649   interface(REG_INTER);
3650 %}
3651 
3652 // Special Registers
3653 operand eAXRegI(xRegI reg) %{
3654   constraint(ALLOC_IN_RC(eax_reg));
3655   match(reg);
3656   match(rRegI);
3657 
3658   format %{ "EAX" %}
3659   interface(REG_INTER);
3660 %}
3661 
3662 // Special Registers
3663 operand eBXRegI(xRegI reg) %{
3664   constraint(ALLOC_IN_RC(ebx_reg));
3665   match(reg);
3666   match(rRegI);
3667 
3668   format %{ "EBX" %}
3669   interface(REG_INTER);
3670 %}
3671 
3672 operand eCXRegI(xRegI reg) %{
3673   constraint(ALLOC_IN_RC(ecx_reg));
3674   match(reg);
3675   match(rRegI);
3676 
3677   format %{ "ECX" %}
3678   interface(REG_INTER);
3679 %}
3680 
3681 operand eDXRegI(xRegI reg) %{
3682   constraint(ALLOC_IN_RC(edx_reg));
3683   match(reg);
3684   match(rRegI);
3685 
3686   format %{ "EDX" %}
3687   interface(REG_INTER);
3688 %}
3689 
3690 operand eDIRegI(xRegI reg) %{
3691   constraint(ALLOC_IN_RC(edi_reg));
3692   match(reg);
3693   match(rRegI);
3694 
3695   format %{ "EDI" %}
3696   interface(REG_INTER);
3697 %}
3698 
3699 operand naxRegI() %{
3700   constraint(ALLOC_IN_RC(nax_reg));
3701   match(RegI);
3702   match(eCXRegI);
3703   match(eDXRegI);
3704   match(eSIRegI);
3705   match(eDIRegI);
3706 
3707   format %{ %}
3708   interface(REG_INTER);
3709 %}
3710 
3711 operand nadxRegI() %{
3712   constraint(ALLOC_IN_RC(nadx_reg));
3713   match(RegI);
3714   match(eBXRegI);
3715   match(eCXRegI);
3716   match(eSIRegI);
3717   match(eDIRegI);
3718 
3719   format %{ %}
3720   interface(REG_INTER);
3721 %}
3722 
3723 operand ncxRegI() %{
3724   constraint(ALLOC_IN_RC(ncx_reg));
3725   match(RegI);
3726   match(eAXRegI);
3727   match(eDXRegI);
3728   match(eSIRegI);
3729   match(eDIRegI);
3730 
3731   format %{ %}
3732   interface(REG_INTER);
3733 %}
3734 
3735 // // This operand was used by cmpFastUnlock, but conflicted with 'object' reg
3736 // //
3737 operand eSIRegI(xRegI reg) %{
3738    constraint(ALLOC_IN_RC(esi_reg));
3739    match(reg);
3740    match(rRegI);
3741 
3742    format %{ "ESI" %}
3743    interface(REG_INTER);
3744 %}
3745 
3746 // Pointer Register
3747 operand anyRegP() %{
3748   constraint(ALLOC_IN_RC(any_reg));
3749   match(RegP);
3750   match(eAXRegP);
3751   match(eBXRegP);
3752   match(eCXRegP);
3753   match(eDIRegP);
3754   match(eRegP);
3755 
3756   format %{ %}
3757   interface(REG_INTER);
3758 %}
3759 
3760 operand eRegP() %{
3761   constraint(ALLOC_IN_RC(int_reg));
3762   match(RegP);
3763   match(eAXRegP);
3764   match(eBXRegP);
3765   match(eCXRegP);
3766   match(eDIRegP);
3767 
3768   format %{ %}
3769   interface(REG_INTER);
3770 %}
3771 
3772 // On windows95, EBP is not safe to use for implicit null tests.
3773 operand eRegP_no_EBP() %{
3774   constraint(ALLOC_IN_RC(int_reg_no_ebp));
3775   match(RegP);
3776   match(eAXRegP);
3777   match(eBXRegP);
3778   match(eCXRegP);
3779   match(eDIRegP);
3780 
3781   op_cost(100);
3782   format %{ %}
3783   interface(REG_INTER);
3784 %}
3785 
3786 operand naxRegP() %{
3787   constraint(ALLOC_IN_RC(nax_reg));
3788   match(RegP);
3789   match(eBXRegP);
3790   match(eDXRegP);
3791   match(eCXRegP);
3792   match(eSIRegP);
3793   match(eDIRegP);
3794 
3795   format %{ %}
3796   interface(REG_INTER);
3797 %}
3798 
3799 operand nabxRegP() %{
3800   constraint(ALLOC_IN_RC(nabx_reg));
3801   match(RegP);
3802   match(eCXRegP);
3803   match(eDXRegP);
3804   match(eSIRegP);
3805   match(eDIRegP);
3806 
3807   format %{ %}
3808   interface(REG_INTER);
3809 %}
3810 
3811 operand pRegP() %{
3812   constraint(ALLOC_IN_RC(p_reg));
3813   match(RegP);
3814   match(eBXRegP);
3815   match(eDXRegP);
3816   match(eSIRegP);
3817   match(eDIRegP);
3818 
3819   format %{ %}
3820   interface(REG_INTER);
3821 %}
3822 
3823 // Special Registers
3824 // Return a pointer value
3825 operand eAXRegP(eRegP reg) %{
3826   constraint(ALLOC_IN_RC(eax_reg));
3827   match(reg);
3828   format %{ "EAX" %}
3829   interface(REG_INTER);
3830 %}
3831 
3832 // Used in AtomicAdd
3833 operand eBXRegP(eRegP reg) %{
3834   constraint(ALLOC_IN_RC(ebx_reg));
3835   match(reg);
3836   format %{ "EBX" %}
3837   interface(REG_INTER);
3838 %}
3839 
3840 // Tail-call (interprocedural jump) to interpreter
3841 operand eCXRegP(eRegP reg) %{
3842   constraint(ALLOC_IN_RC(ecx_reg));
3843   match(reg);
3844   format %{ "ECX" %}
3845   interface(REG_INTER);
3846 %}
3847 
3848 operand eSIRegP(eRegP reg) %{
3849   constraint(ALLOC_IN_RC(esi_reg));
3850   match(reg);
3851   format %{ "ESI" %}
3852   interface(REG_INTER);
3853 %}
3854 
3855 // Used in rep stosw
3856 operand eDIRegP(eRegP reg) %{
3857   constraint(ALLOC_IN_RC(edi_reg));
3858   match(reg);
3859   format %{ "EDI" %}
3860   interface(REG_INTER);
3861 %}
3862 
3863 operand eRegL() %{
3864   constraint(ALLOC_IN_RC(long_reg));
3865   match(RegL);
3866   match(eADXRegL);
3867 
3868   format %{ %}
3869   interface(REG_INTER);
3870 %}
3871 
3872 operand eADXRegL( eRegL reg ) %{
3873   constraint(ALLOC_IN_RC(eadx_reg));
3874   match(reg);
3875 
3876   format %{ "EDX:EAX" %}
3877   interface(REG_INTER);
3878 %}
3879 
3880 operand eBCXRegL( eRegL reg ) %{
3881   constraint(ALLOC_IN_RC(ebcx_reg));
3882   match(reg);
3883 
3884   format %{ "EBX:ECX" %}
3885   interface(REG_INTER);
3886 %}
3887 
3888 // Special case for integer high multiply
3889 operand eADXRegL_low_only() %{
3890   constraint(ALLOC_IN_RC(eadx_reg));
3891   match(RegL);
3892 
3893   format %{ "EAX" %}
3894   interface(REG_INTER);
3895 %}
3896 
3897 // Flags register, used as output of compare instructions
3898 operand eFlagsReg() %{
3899   constraint(ALLOC_IN_RC(int_flags));
3900   match(RegFlags);
3901 
3902   format %{ "EFLAGS" %}
3903   interface(REG_INTER);
3904 %}
3905 
3906 // Flags register, used as output of FLOATING POINT compare instructions
3907 operand eFlagsRegU() %{
3908   constraint(ALLOC_IN_RC(int_flags));
3909   match(RegFlags);
3910 
3911   format %{ "EFLAGS_U" %}
3912   interface(REG_INTER);
3913 %}
3914 
3915 operand eFlagsRegUCF() %{
3916   constraint(ALLOC_IN_RC(int_flags));
3917   match(RegFlags);
3918   predicate(false);
3919 
3920   format %{ "EFLAGS_U_CF" %}
3921   interface(REG_INTER);
3922 %}
3923 
3924 // Condition Code Register used by long compare
3925 operand flagsReg_long_LTGE() %{
3926   constraint(ALLOC_IN_RC(int_flags));
3927   match(RegFlags);
3928   format %{ "FLAGS_LTGE" %}
3929   interface(REG_INTER);
3930 %}
3931 operand flagsReg_long_EQNE() %{
3932   constraint(ALLOC_IN_RC(int_flags));
3933   match(RegFlags);
3934   format %{ "FLAGS_EQNE" %}
3935   interface(REG_INTER);
3936 %}
3937 operand flagsReg_long_LEGT() %{
3938   constraint(ALLOC_IN_RC(int_flags));
3939   match(RegFlags);
3940   format %{ "FLAGS_LEGT" %}
3941   interface(REG_INTER);
3942 %}
3943 
3944 // Float register operands
3945 operand regDPR() %{
3946   predicate( UseSSE < 2 );
3947   constraint(ALLOC_IN_RC(fp_dbl_reg));
3948   match(RegD);
3949   match(regDPR1);
3950   match(regDPR2);
3951   format %{ %}
3952   interface(REG_INTER);
3953 %}
3954 
3955 operand regDPR1(regDPR reg) %{
3956   predicate( UseSSE < 2 );
3957   constraint(ALLOC_IN_RC(fp_dbl_reg0));
3958   match(reg);
3959   format %{ "FPR1" %}
3960   interface(REG_INTER);
3961 %}
3962 
3963 operand regDPR2(regDPR reg) %{
3964   predicate( UseSSE < 2 );
3965   constraint(ALLOC_IN_RC(fp_dbl_reg1));
3966   match(reg);
3967   format %{ "FPR2" %}
3968   interface(REG_INTER);
3969 %}
3970 
3971 operand regnotDPR1(regDPR reg) %{
3972   predicate( UseSSE < 2 );
3973   constraint(ALLOC_IN_RC(fp_dbl_notreg0));
3974   match(reg);
3975   format %{ %}
3976   interface(REG_INTER);
3977 %}
3978 
3979 // Float register operands
3980 operand regFPR() %{
3981   predicate( UseSSE < 2 );
3982   constraint(ALLOC_IN_RC(fp_flt_reg));
3983   match(RegF);
3984   match(regFPR1);
3985   format %{ %}
3986   interface(REG_INTER);
3987 %}
3988 
3989 // Float register operands
3990 operand regFPR1(regFPR reg) %{
3991   predicate( UseSSE < 2 );
3992   constraint(ALLOC_IN_RC(fp_flt_reg0));
3993   match(reg);
3994   format %{ "FPR1" %}
3995   interface(REG_INTER);
3996 %}
3997 
3998 // XMM Float register operands
3999 operand regF() %{
4000   predicate( UseSSE>=1 );
4001   constraint(ALLOC_IN_RC(float_reg));
4002   match(RegF);
4003   format %{ %}
4004   interface(REG_INTER);
4005 %}
4006 
4007 // XMM Double register operands
4008 operand regD() %{
4009   predicate( UseSSE>=2 );
4010   constraint(ALLOC_IN_RC(double_reg));
4011   match(RegD);
4012   format %{ %}
4013   interface(REG_INTER);
4014 %}
4015 
4016 
4017 //----------Memory Operands----------------------------------------------------
4018 // Direct Memory Operand
4019 operand direct(immP addr) %{
4020   match(addr);
4021 
4022   format %{ "[$addr]" %}
4023   interface(MEMORY_INTER) %{
4024     base(0xFFFFFFFF);
4025     index(0x4);
4026     scale(0x0);
4027     disp($addr);
4028   %}
4029 %}
4030 
4031 // Indirect Memory Operand
4032 operand indirect(eRegP reg) %{
4033   constraint(ALLOC_IN_RC(int_reg));
4034   match(reg);
4035 
4036   format %{ "[$reg]" %}
4037   interface(MEMORY_INTER) %{
4038     base($reg);
4039     index(0x4);
4040     scale(0x0);
4041     disp(0x0);
4042   %}
4043 %}
4044 
4045 // Indirect Memory Plus Short Offset Operand
4046 operand indOffset8(eRegP reg, immI8 off) %{
4047   match(AddP reg off);
4048 
4049   format %{ "[$reg + $off]" %}
4050   interface(MEMORY_INTER) %{
4051     base($reg);
4052     index(0x4);
4053     scale(0x0);
4054     disp($off);
4055   %}
4056 %}
4057 
4058 // Indirect Memory Plus Long Offset Operand
4059 operand indOffset32(eRegP reg, immI off) %{
4060   match(AddP reg off);
4061 
4062   format %{ "[$reg + $off]" %}
4063   interface(MEMORY_INTER) %{
4064     base($reg);
4065     index(0x4);
4066     scale(0x0);
4067     disp($off);
4068   %}
4069 %}
4070 
4071 // Indirect Memory Plus Long Offset Operand
4072 operand indOffset32X(rRegI reg, immP off) %{
4073   match(AddP off reg);
4074 
4075   format %{ "[$reg + $off]" %}
4076   interface(MEMORY_INTER) %{
4077     base($reg);
4078     index(0x4);
4079     scale(0x0);
4080     disp($off);
4081   %}
4082 %}
4083 
4084 // Indirect Memory Plus Index Register Plus Offset Operand
4085 operand indIndexOffset(eRegP reg, rRegI ireg, immI off) %{
4086   match(AddP (AddP reg ireg) off);
4087 
4088   op_cost(10);
4089   format %{"[$reg + $off + $ireg]" %}
4090   interface(MEMORY_INTER) %{
4091     base($reg);
4092     index($ireg);
4093     scale(0x0);
4094     disp($off);
4095   %}
4096 %}
4097 
4098 // Indirect Memory Plus Index Register Plus Offset Operand
4099 operand indIndex(eRegP reg, rRegI ireg) %{
4100   match(AddP reg ireg);
4101 
4102   op_cost(10);
4103   format %{"[$reg + $ireg]" %}
4104   interface(MEMORY_INTER) %{
4105     base($reg);
4106     index($ireg);
4107     scale(0x0);
4108     disp(0x0);
4109   %}
4110 %}
4111 
4112 // // -------------------------------------------------------------------------
4113 // // 486 architecture doesn't support "scale * index + offset" with out a base
4114 // // -------------------------------------------------------------------------
4115 // // Scaled Memory Operands
4116 // // Indirect Memory Times Scale Plus Offset Operand
4117 // operand indScaleOffset(immP off, rRegI ireg, immI2 scale) %{
4118 //   match(AddP off (LShiftI ireg scale));
4119 //
4120 //   op_cost(10);
4121 //   format %{"[$off + $ireg << $scale]" %}
4122 //   interface(MEMORY_INTER) %{
4123 //     base(0x4);
4124 //     index($ireg);
4125 //     scale($scale);
4126 //     disp($off);
4127 //   %}
4128 // %}
4129 
4130 // Indirect Memory Times Scale Plus Index Register
4131 operand indIndexScale(eRegP reg, rRegI ireg, immI2 scale) %{
4132   match(AddP reg (LShiftI ireg scale));
4133 
4134   op_cost(10);
4135   format %{"[$reg + $ireg << $scale]" %}
4136   interface(MEMORY_INTER) %{
4137     base($reg);
4138     index($ireg);
4139     scale($scale);
4140     disp(0x0);
4141   %}
4142 %}
4143 
4144 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
4145 operand indIndexScaleOffset(eRegP reg, immI off, rRegI ireg, immI2 scale) %{
4146   match(AddP (AddP reg (LShiftI ireg scale)) off);
4147 
4148   op_cost(10);
4149   format %{"[$reg + $off + $ireg << $scale]" %}
4150   interface(MEMORY_INTER) %{
4151     base($reg);
4152     index($ireg);
4153     scale($scale);
4154     disp($off);
4155   %}
4156 %}
4157 
4158 //----------Load Long Memory Operands------------------------------------------
4159 // The load-long idiom will use it's address expression again after loading
4160 // the first word of the long.  If the load-long destination overlaps with
4161 // registers used in the addressing expression, the 2nd half will be loaded
4162 // from a clobbered address.  Fix this by requiring that load-long use
4163 // address registers that do not overlap with the load-long target.
4164 
4165 // load-long support
4166 operand load_long_RegP() %{
4167   constraint(ALLOC_IN_RC(esi_reg));
4168   match(RegP);
4169   match(eSIRegP);
4170   op_cost(100);
4171   format %{  %}
4172   interface(REG_INTER);
4173 %}
4174 
4175 // Indirect Memory Operand Long
4176 operand load_long_indirect(load_long_RegP reg) %{
4177   constraint(ALLOC_IN_RC(esi_reg));
4178   match(reg);
4179 
4180   format %{ "[$reg]" %}
4181   interface(MEMORY_INTER) %{
4182     base($reg);
4183     index(0x4);
4184     scale(0x0);
4185     disp(0x0);
4186   %}
4187 %}
4188 
4189 // Indirect Memory Plus Long Offset Operand
4190 operand load_long_indOffset32(load_long_RegP reg, immI off) %{
4191   match(AddP reg off);
4192 
4193   format %{ "[$reg + $off]" %}
4194   interface(MEMORY_INTER) %{
4195     base($reg);
4196     index(0x4);
4197     scale(0x0);
4198     disp($off);
4199   %}
4200 %}
4201 
4202 opclass load_long_memory(load_long_indirect, load_long_indOffset32);
4203 
4204 
4205 //----------Special Memory Operands--------------------------------------------
4206 // Stack Slot Operand - This operand is used for loading and storing temporary
4207 //                      values on the stack where a match requires a value to
4208 //                      flow through memory.
4209 operand stackSlotP(sRegP reg) %{
4210   constraint(ALLOC_IN_RC(stack_slots));
4211   // No match rule because this operand is only generated in matching
4212   format %{ "[$reg]" %}
4213   interface(MEMORY_INTER) %{
4214     base(0x4);   // ESP
4215     index(0x4);  // No Index
4216     scale(0x0);  // No Scale
4217     disp($reg);  // Stack Offset
4218   %}
4219 %}
4220 
4221 operand stackSlotI(sRegI reg) %{
4222   constraint(ALLOC_IN_RC(stack_slots));
4223   // No match rule because this operand is only generated in matching
4224   format %{ "[$reg]" %}
4225   interface(MEMORY_INTER) %{
4226     base(0x4);   // ESP
4227     index(0x4);  // No Index
4228     scale(0x0);  // No Scale
4229     disp($reg);  // Stack Offset
4230   %}
4231 %}
4232 
4233 operand stackSlotF(sRegF reg) %{
4234   constraint(ALLOC_IN_RC(stack_slots));
4235   // No match rule because this operand is only generated in matching
4236   format %{ "[$reg]" %}
4237   interface(MEMORY_INTER) %{
4238     base(0x4);   // ESP
4239     index(0x4);  // No Index
4240     scale(0x0);  // No Scale
4241     disp($reg);  // Stack Offset
4242   %}
4243 %}
4244 
4245 operand stackSlotD(sRegD reg) %{
4246   constraint(ALLOC_IN_RC(stack_slots));
4247   // No match rule because this operand is only generated in matching
4248   format %{ "[$reg]" %}
4249   interface(MEMORY_INTER) %{
4250     base(0x4);   // ESP
4251     index(0x4);  // No Index
4252     scale(0x0);  // No Scale
4253     disp($reg);  // Stack Offset
4254   %}
4255 %}
4256 
4257 operand stackSlotL(sRegL reg) %{
4258   constraint(ALLOC_IN_RC(stack_slots));
4259   // No match rule because this operand is only generated in matching
4260   format %{ "[$reg]" %}
4261   interface(MEMORY_INTER) %{
4262     base(0x4);   // ESP
4263     index(0x4);  // No Index
4264     scale(0x0);  // No Scale
4265     disp($reg);  // Stack Offset
4266   %}
4267 %}
4268 
4269 //----------Memory Operands - Win95 Implicit Null Variants----------------
4270 // Indirect Memory Operand
4271 operand indirect_win95_safe(eRegP_no_EBP reg)
4272 %{
4273   constraint(ALLOC_IN_RC(int_reg));
4274   match(reg);
4275 
4276   op_cost(100);
4277   format %{ "[$reg]" %}
4278   interface(MEMORY_INTER) %{
4279     base($reg);
4280     index(0x4);
4281     scale(0x0);
4282     disp(0x0);
4283   %}
4284 %}
4285 
4286 // Indirect Memory Plus Short Offset Operand
4287 operand indOffset8_win95_safe(eRegP_no_EBP reg, immI8 off)
4288 %{
4289   match(AddP reg off);
4290 
4291   op_cost(100);
4292   format %{ "[$reg + $off]" %}
4293   interface(MEMORY_INTER) %{
4294     base($reg);
4295     index(0x4);
4296     scale(0x0);
4297     disp($off);
4298   %}
4299 %}
4300 
4301 // Indirect Memory Plus Long Offset Operand
4302 operand indOffset32_win95_safe(eRegP_no_EBP reg, immI off)
4303 %{
4304   match(AddP reg off);
4305 
4306   op_cost(100);
4307   format %{ "[$reg + $off]" %}
4308   interface(MEMORY_INTER) %{
4309     base($reg);
4310     index(0x4);
4311     scale(0x0);
4312     disp($off);
4313   %}
4314 %}
4315 
4316 // Indirect Memory Plus Index Register Plus Offset Operand
4317 operand indIndexOffset_win95_safe(eRegP_no_EBP reg, rRegI ireg, immI off)
4318 %{
4319   match(AddP (AddP reg ireg) off);
4320 
4321   op_cost(100);
4322   format %{"[$reg + $off + $ireg]" %}
4323   interface(MEMORY_INTER) %{
4324     base($reg);
4325     index($ireg);
4326     scale(0x0);
4327     disp($off);
4328   %}
4329 %}
4330 
4331 // Indirect Memory Times Scale Plus Index Register
4332 operand indIndexScale_win95_safe(eRegP_no_EBP reg, rRegI ireg, immI2 scale)
4333 %{
4334   match(AddP reg (LShiftI ireg scale));
4335 
4336   op_cost(100);
4337   format %{"[$reg + $ireg << $scale]" %}
4338   interface(MEMORY_INTER) %{
4339     base($reg);
4340     index($ireg);
4341     scale($scale);
4342     disp(0x0);
4343   %}
4344 %}
4345 
4346 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
4347 operand indIndexScaleOffset_win95_safe(eRegP_no_EBP reg, immI off, rRegI ireg, immI2 scale)
4348 %{
4349   match(AddP (AddP reg (LShiftI ireg scale)) off);
4350 
4351   op_cost(100);
4352   format %{"[$reg + $off + $ireg << $scale]" %}
4353   interface(MEMORY_INTER) %{
4354     base($reg);
4355     index($ireg);
4356     scale($scale);
4357     disp($off);
4358   %}
4359 %}
4360 
4361 //----------Conditional Branch Operands----------------------------------------
4362 // Comparison Op  - This is the operation of the comparison, and is limited to
4363 //                  the following set of codes:
4364 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
4365 //
4366 // Other attributes of the comparison, such as unsignedness, are specified
4367 // by the comparison instruction that sets a condition code flags register.
4368 // That result is represented by a flags operand whose subtype is appropriate
4369 // to the unsignedness (etc.) of the comparison.
4370 //
4371 // Later, the instruction which matches both the Comparison Op (a Bool) and
4372 // the flags (produced by the Cmp) specifies the coding of the comparison op
4373 // by matching a specific subtype of Bool operand below, such as cmpOpU.
4374 
4375 // Comparision Code
4376 operand cmpOp() %{
4377   match(Bool);
4378 
4379   format %{ "" %}
4380   interface(COND_INTER) %{
4381     equal(0x4, "e");
4382     not_equal(0x5, "ne");
4383     less(0xC, "l");
4384     greater_equal(0xD, "ge");
4385     less_equal(0xE, "le");
4386     greater(0xF, "g");
4387     overflow(0x0, "o");
4388     no_overflow(0x1, "no");
4389   %}
4390 %}
4391 
4392 // Comparison Code, unsigned compare.  Used by FP also, with
4393 // C2 (unordered) turned into GT or LT already.  The other bits
4394 // C0 and C3 are turned into Carry & Zero flags.
4395 operand cmpOpU() %{
4396   match(Bool);
4397 
4398   format %{ "" %}
4399   interface(COND_INTER) %{
4400     equal(0x4, "e");
4401     not_equal(0x5, "ne");
4402     less(0x2, "b");
4403     greater_equal(0x3, "nb");
4404     less_equal(0x6, "be");
4405     greater(0x7, "nbe");
4406     overflow(0x0, "o");
4407     no_overflow(0x1, "no");
4408   %}
4409 %}
4410 
4411 // Floating comparisons that don't require any fixup for the unordered case
4412 operand cmpOpUCF() %{
4413   match(Bool);
4414   predicate(n->as_Bool()->_test._test == BoolTest::lt ||
4415             n->as_Bool()->_test._test == BoolTest::ge ||
4416             n->as_Bool()->_test._test == BoolTest::le ||
4417             n->as_Bool()->_test._test == BoolTest::gt);
4418   format %{ "" %}
4419   interface(COND_INTER) %{
4420     equal(0x4, "e");
4421     not_equal(0x5, "ne");
4422     less(0x2, "b");
4423     greater_equal(0x3, "nb");
4424     less_equal(0x6, "be");
4425     greater(0x7, "nbe");
4426     overflow(0x0, "o");
4427     no_overflow(0x1, "no");
4428   %}
4429 %}
4430 
4431 
4432 // Floating comparisons that can be fixed up with extra conditional jumps
4433 operand cmpOpUCF2() %{
4434   match(Bool);
4435   predicate(n->as_Bool()->_test._test == BoolTest::ne ||
4436             n->as_Bool()->_test._test == BoolTest::eq);
4437   format %{ "" %}
4438   interface(COND_INTER) %{
4439     equal(0x4, "e");
4440     not_equal(0x5, "ne");
4441     less(0x2, "b");
4442     greater_equal(0x3, "nb");
4443     less_equal(0x6, "be");
4444     greater(0x7, "nbe");
4445     overflow(0x0, "o");
4446     no_overflow(0x1, "no");
4447   %}
4448 %}
4449 
4450 // Comparison Code for FP conditional move
4451 operand cmpOp_fcmov() %{
4452   match(Bool);
4453 
4454   predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
4455             n->as_Bool()->_test._test != BoolTest::no_overflow);
4456   format %{ "" %}
4457   interface(COND_INTER) %{
4458     equal        (0x0C8);
4459     not_equal    (0x1C8);
4460     less         (0x0C0);
4461     greater_equal(0x1C0);
4462     less_equal   (0x0D0);
4463     greater      (0x1D0);
4464     overflow(0x0, "o"); // not really supported by the instruction
4465     no_overflow(0x1, "no"); // not really supported by the instruction
4466   %}
4467 %}
4468 
4469 // Comparision Code used in long compares
4470 operand cmpOp_commute() %{
4471   match(Bool);
4472 
4473   format %{ "" %}
4474   interface(COND_INTER) %{
4475     equal(0x4, "e");
4476     not_equal(0x5, "ne");
4477     less(0xF, "g");
4478     greater_equal(0xE, "le");
4479     less_equal(0xD, "ge");
4480     greater(0xC, "l");
4481     overflow(0x0, "o");
4482     no_overflow(0x1, "no");
4483   %}
4484 %}
4485 
4486 //----------OPERAND CLASSES----------------------------------------------------
4487 // Operand Classes are groups of operands that are used as to simplify
4488 // instruction definitions by not requiring the AD writer to specify separate
4489 // instructions for every form of operand when the instruction accepts
4490 // multiple operand types with the same basic encoding and format.  The classic
4491 // case of this is memory operands.
4492 
4493 opclass memory(direct, indirect, indOffset8, indOffset32, indOffset32X, indIndexOffset,
4494                indIndex, indIndexScale, indIndexScaleOffset);
4495 
4496 // Long memory operations are encoded in 2 instructions and a +4 offset.
4497 // This means some kind of offset is always required and you cannot use
4498 // an oop as the offset (done when working on static globals).
4499 opclass long_memory(direct, indirect, indOffset8, indOffset32, indIndexOffset,
4500                     indIndex, indIndexScale, indIndexScaleOffset);
4501 
4502 
4503 //----------PIPELINE-----------------------------------------------------------
4504 // Rules which define the behavior of the target architectures pipeline.
4505 pipeline %{
4506 
4507 //----------ATTRIBUTES---------------------------------------------------------
4508 attributes %{
4509   variable_size_instructions;        // Fixed size instructions
4510   max_instructions_per_bundle = 3;   // Up to 3 instructions per bundle
4511   instruction_unit_size = 1;         // An instruction is 1 bytes long
4512   instruction_fetch_unit_size = 16;  // The processor fetches one line
4513   instruction_fetch_units = 1;       // of 16 bytes
4514 
4515   // List of nop instructions
4516   nops( MachNop );
4517 %}
4518 
4519 //----------RESOURCES----------------------------------------------------------
4520 // Resources are the functional units available to the machine
4521 
4522 // Generic P2/P3 pipeline
4523 // 3 decoders, only D0 handles big operands; a "bundle" is the limit of
4524 // 3 instructions decoded per cycle.
4525 // 2 load/store ops per cycle, 1 branch, 1 FPU,
4526 // 2 ALU op, only ALU0 handles mul/div instructions.
4527 resources( D0, D1, D2, DECODE = D0 | D1 | D2,
4528            MS0, MS1, MEM = MS0 | MS1,
4529            BR, FPU,
4530            ALU0, ALU1, ALU = ALU0 | ALU1 );
4531 
4532 //----------PIPELINE DESCRIPTION-----------------------------------------------
4533 // Pipeline Description specifies the stages in the machine's pipeline
4534 
4535 // Generic P2/P3 pipeline
4536 pipe_desc(S0, S1, S2, S3, S4, S5);
4537 
4538 //----------PIPELINE CLASSES---------------------------------------------------
4539 // Pipeline Classes describe the stages in which input and output are
4540 // referenced by the hardware pipeline.
4541 
4542 // Naming convention: ialu or fpu
4543 // Then: _reg
4544 // Then: _reg if there is a 2nd register
4545 // Then: _long if it's a pair of instructions implementing a long
4546 // Then: _fat if it requires the big decoder
4547 //   Or: _mem if it requires the big decoder and a memory unit.
4548 
4549 // Integer ALU reg operation
4550 pipe_class ialu_reg(rRegI dst) %{
4551     single_instruction;
4552     dst    : S4(write);
4553     dst    : S3(read);
4554     DECODE : S0;        // any decoder
4555     ALU    : S3;        // any alu
4556 %}
4557 
4558 // Long ALU reg operation
4559 pipe_class ialu_reg_long(eRegL dst) %{
4560     instruction_count(2);
4561     dst    : S4(write);
4562     dst    : S3(read);
4563     DECODE : S0(2);     // any 2 decoders
4564     ALU    : S3(2);     // both alus
4565 %}
4566 
4567 // Integer ALU reg operation using big decoder
4568 pipe_class ialu_reg_fat(rRegI dst) %{
4569     single_instruction;
4570     dst    : S4(write);
4571     dst    : S3(read);
4572     D0     : S0;        // big decoder only
4573     ALU    : S3;        // any alu
4574 %}
4575 
4576 // Long ALU reg operation using big decoder
4577 pipe_class ialu_reg_long_fat(eRegL dst) %{
4578     instruction_count(2);
4579     dst    : S4(write);
4580     dst    : S3(read);
4581     D0     : S0(2);     // big decoder only; twice
4582     ALU    : S3(2);     // any 2 alus
4583 %}
4584 
4585 // Integer ALU reg-reg operation
4586 pipe_class ialu_reg_reg(rRegI dst, rRegI src) %{
4587     single_instruction;
4588     dst    : S4(write);
4589     src    : S3(read);
4590     DECODE : S0;        // any decoder
4591     ALU    : S3;        // any alu
4592 %}
4593 
4594 // Long ALU reg-reg operation
4595 pipe_class ialu_reg_reg_long(eRegL dst, eRegL src) %{
4596     instruction_count(2);
4597     dst    : S4(write);
4598     src    : S3(read);
4599     DECODE : S0(2);     // any 2 decoders
4600     ALU    : S3(2);     // both alus
4601 %}
4602 
4603 // Integer ALU reg-reg operation
4604 pipe_class ialu_reg_reg_fat(rRegI dst, memory src) %{
4605     single_instruction;
4606     dst    : S4(write);
4607     src    : S3(read);
4608     D0     : S0;        // big decoder only
4609     ALU    : S3;        // any alu
4610 %}
4611 
4612 // Long ALU reg-reg operation
4613 pipe_class ialu_reg_reg_long_fat(eRegL dst, eRegL src) %{
4614     instruction_count(2);
4615     dst    : S4(write);
4616     src    : S3(read);
4617     D0     : S0(2);     // big decoder only; twice
4618     ALU    : S3(2);     // both alus
4619 %}
4620 
4621 // Integer ALU reg-mem operation
4622 pipe_class ialu_reg_mem(rRegI dst, memory mem) %{
4623     single_instruction;
4624     dst    : S5(write);
4625     mem    : S3(read);
4626     D0     : S0;        // big decoder only
4627     ALU    : S4;        // any alu
4628     MEM    : S3;        // any mem
4629 %}
4630 
4631 // Long ALU reg-mem operation
4632 pipe_class ialu_reg_long_mem(eRegL dst, load_long_memory mem) %{
4633     instruction_count(2);
4634     dst    : S5(write);
4635     mem    : S3(read);
4636     D0     : S0(2);     // big decoder only; twice
4637     ALU    : S4(2);     // any 2 alus
4638     MEM    : S3(2);     // both mems
4639 %}
4640 
4641 // Integer mem operation (prefetch)
4642 pipe_class ialu_mem(memory mem)
4643 %{
4644     single_instruction;
4645     mem    : S3(read);
4646     D0     : S0;        // big decoder only
4647     MEM    : S3;        // any mem
4648 %}
4649 
4650 // Integer Store to Memory
4651 pipe_class ialu_mem_reg(memory mem, rRegI src) %{
4652     single_instruction;
4653     mem    : S3(read);
4654     src    : S5(read);
4655     D0     : S0;        // big decoder only
4656     ALU    : S4;        // any alu
4657     MEM    : S3;
4658 %}
4659 
4660 // Long Store to Memory
4661 pipe_class ialu_mem_long_reg(memory mem, eRegL src) %{
4662     instruction_count(2);
4663     mem    : S3(read);
4664     src    : S5(read);
4665     D0     : S0(2);     // big decoder only; twice
4666     ALU    : S4(2);     // any 2 alus
4667     MEM    : S3(2);     // Both mems
4668 %}
4669 
4670 // Integer Store to Memory
4671 pipe_class ialu_mem_imm(memory mem) %{
4672     single_instruction;
4673     mem    : S3(read);
4674     D0     : S0;        // big decoder only
4675     ALU    : S4;        // any alu
4676     MEM    : S3;
4677 %}
4678 
4679 // Integer ALU0 reg-reg operation
4680 pipe_class ialu_reg_reg_alu0(rRegI dst, rRegI src) %{
4681     single_instruction;
4682     dst    : S4(write);
4683     src    : S3(read);
4684     D0     : S0;        // Big decoder only
4685     ALU0   : S3;        // only alu0
4686 %}
4687 
4688 // Integer ALU0 reg-mem operation
4689 pipe_class ialu_reg_mem_alu0(rRegI dst, memory mem) %{
4690     single_instruction;
4691     dst    : S5(write);
4692     mem    : S3(read);
4693     D0     : S0;        // big decoder only
4694     ALU0   : S4;        // ALU0 only
4695     MEM    : S3;        // any mem
4696 %}
4697 
4698 // Integer ALU reg-reg operation
4699 pipe_class ialu_cr_reg_reg(eFlagsReg cr, rRegI src1, rRegI src2) %{
4700     single_instruction;
4701     cr     : S4(write);
4702     src1   : S3(read);
4703     src2   : S3(read);
4704     DECODE : S0;        // any decoder
4705     ALU    : S3;        // any alu
4706 %}
4707 
4708 // Integer ALU reg-imm operation
4709 pipe_class ialu_cr_reg_imm(eFlagsReg cr, rRegI src1) %{
4710     single_instruction;
4711     cr     : S4(write);
4712     src1   : S3(read);
4713     DECODE : S0;        // any decoder
4714     ALU    : S3;        // any alu
4715 %}
4716 
4717 // Integer ALU reg-mem operation
4718 pipe_class ialu_cr_reg_mem(eFlagsReg cr, rRegI src1, memory src2) %{
4719     single_instruction;
4720     cr     : S4(write);
4721     src1   : S3(read);
4722     src2   : S3(read);
4723     D0     : S0;        // big decoder only
4724     ALU    : S4;        // any alu
4725     MEM    : S3;
4726 %}
4727 
4728 // Conditional move reg-reg
4729 pipe_class pipe_cmplt( rRegI p, rRegI q, rRegI y ) %{
4730     instruction_count(4);
4731     y      : S4(read);
4732     q      : S3(read);
4733     p      : S3(read);
4734     DECODE : S0(4);     // any decoder
4735 %}
4736 
4737 // Conditional move reg-reg
4738 pipe_class pipe_cmov_reg( rRegI dst, rRegI src, eFlagsReg cr ) %{
4739     single_instruction;
4740     dst    : S4(write);
4741     src    : S3(read);
4742     cr     : S3(read);
4743     DECODE : S0;        // any decoder
4744 %}
4745 
4746 // Conditional move reg-mem
4747 pipe_class pipe_cmov_mem( eFlagsReg cr, rRegI dst, memory src) %{
4748     single_instruction;
4749     dst    : S4(write);
4750     src    : S3(read);
4751     cr     : S3(read);
4752     DECODE : S0;        // any decoder
4753     MEM    : S3;
4754 %}
4755 
4756 // Conditional move reg-reg long
4757 pipe_class pipe_cmov_reg_long( eFlagsReg cr, eRegL dst, eRegL src) %{
4758     single_instruction;
4759     dst    : S4(write);
4760     src    : S3(read);
4761     cr     : S3(read);
4762     DECODE : S0(2);     // any 2 decoders
4763 %}
4764 
4765 // Conditional move double reg-reg
4766 pipe_class pipe_cmovDPR_reg( eFlagsReg cr, regDPR1 dst, regDPR src) %{
4767     single_instruction;
4768     dst    : S4(write);
4769     src    : S3(read);
4770     cr     : S3(read);
4771     DECODE : S0;        // any decoder
4772 %}
4773 
4774 // Float reg-reg operation
4775 pipe_class fpu_reg(regDPR dst) %{
4776     instruction_count(2);
4777     dst    : S3(read);
4778     DECODE : S0(2);     // any 2 decoders
4779     FPU    : S3;
4780 %}
4781 
4782 // Float reg-reg operation
4783 pipe_class fpu_reg_reg(regDPR dst, regDPR src) %{
4784     instruction_count(2);
4785     dst    : S4(write);
4786     src    : S3(read);
4787     DECODE : S0(2);     // any 2 decoders
4788     FPU    : S3;
4789 %}
4790 
4791 // Float reg-reg operation
4792 pipe_class fpu_reg_reg_reg(regDPR dst, regDPR src1, regDPR src2) %{
4793     instruction_count(3);
4794     dst    : S4(write);
4795     src1   : S3(read);
4796     src2   : S3(read);
4797     DECODE : S0(3);     // any 3 decoders
4798     FPU    : S3(2);
4799 %}
4800 
4801 // Float reg-reg operation
4802 pipe_class fpu_reg_reg_reg_reg(regDPR dst, regDPR src1, regDPR src2, regDPR src3) %{
4803     instruction_count(4);
4804     dst    : S4(write);
4805     src1   : S3(read);
4806     src2   : S3(read);
4807     src3   : S3(read);
4808     DECODE : S0(4);     // any 3 decoders
4809     FPU    : S3(2);
4810 %}
4811 
4812 // Float reg-reg operation
4813 pipe_class fpu_reg_mem_reg_reg(regDPR dst, memory src1, regDPR src2, regDPR src3) %{
4814     instruction_count(4);
4815     dst    : S4(write);
4816     src1   : S3(read);
4817     src2   : S3(read);
4818     src3   : S3(read);
4819     DECODE : S1(3);     // any 3 decoders
4820     D0     : S0;        // Big decoder only
4821     FPU    : S3(2);
4822     MEM    : S3;
4823 %}
4824 
4825 // Float reg-mem operation
4826 pipe_class fpu_reg_mem(regDPR dst, memory mem) %{
4827     instruction_count(2);
4828     dst    : S5(write);
4829     mem    : S3(read);
4830     D0     : S0;        // big decoder only
4831     DECODE : S1;        // any decoder for FPU POP
4832     FPU    : S4;
4833     MEM    : S3;        // any mem
4834 %}
4835 
4836 // Float reg-mem operation
4837 pipe_class fpu_reg_reg_mem(regDPR dst, regDPR src1, memory mem) %{
4838     instruction_count(3);
4839     dst    : S5(write);
4840     src1   : S3(read);
4841     mem    : S3(read);
4842     D0     : S0;        // big decoder only
4843     DECODE : S1(2);     // any decoder for FPU POP
4844     FPU    : S4;
4845     MEM    : S3;        // any mem
4846 %}
4847 
4848 // Float mem-reg operation
4849 pipe_class fpu_mem_reg(memory mem, regDPR src) %{
4850     instruction_count(2);
4851     src    : S5(read);
4852     mem    : S3(read);
4853     DECODE : S0;        // any decoder for FPU PUSH
4854     D0     : S1;        // big decoder only
4855     FPU    : S4;
4856     MEM    : S3;        // any mem
4857 %}
4858 
4859 pipe_class fpu_mem_reg_reg(memory mem, regDPR src1, regDPR src2) %{
4860     instruction_count(3);
4861     src1   : S3(read);
4862     src2   : S3(read);
4863     mem    : S3(read);
4864     DECODE : S0(2);     // any decoder for FPU PUSH
4865     D0     : S1;        // big decoder only
4866     FPU    : S4;
4867     MEM    : S3;        // any mem
4868 %}
4869 
4870 pipe_class fpu_mem_reg_mem(memory mem, regDPR src1, memory src2) %{
4871     instruction_count(3);
4872     src1   : S3(read);
4873     src2   : S3(read);
4874     mem    : S4(read);
4875     DECODE : S0;        // any decoder for FPU PUSH
4876     D0     : S0(2);     // big decoder only
4877     FPU    : S4;
4878     MEM    : S3(2);     // any mem
4879 %}
4880 
4881 pipe_class fpu_mem_mem(memory dst, memory src1) %{
4882     instruction_count(2);
4883     src1   : S3(read);
4884     dst    : S4(read);
4885     D0     : S0(2);     // big decoder only
4886     MEM    : S3(2);     // any mem
4887 %}
4888 
4889 pipe_class fpu_mem_mem_mem(memory dst, memory src1, memory src2) %{
4890     instruction_count(3);
4891     src1   : S3(read);
4892     src2   : S3(read);
4893     dst    : S4(read);
4894     D0     : S0(3);     // big decoder only
4895     FPU    : S4;
4896     MEM    : S3(3);     // any mem
4897 %}
4898 
4899 pipe_class fpu_mem_reg_con(memory mem, regDPR src1) %{
4900     instruction_count(3);
4901     src1   : S4(read);
4902     mem    : S4(read);
4903     DECODE : S0;        // any decoder for FPU PUSH
4904     D0     : S0(2);     // big decoder only
4905     FPU    : S4;
4906     MEM    : S3(2);     // any mem
4907 %}
4908 
4909 // Float load constant
4910 pipe_class fpu_reg_con(regDPR dst) %{
4911     instruction_count(2);
4912     dst    : S5(write);
4913     D0     : S0;        // big decoder only for the load
4914     DECODE : S1;        // any decoder for FPU POP
4915     FPU    : S4;
4916     MEM    : S3;        // any mem
4917 %}
4918 
4919 // Float load constant
4920 pipe_class fpu_reg_reg_con(regDPR dst, regDPR src) %{
4921     instruction_count(3);
4922     dst    : S5(write);
4923     src    : S3(read);
4924     D0     : S0;        // big decoder only for the load
4925     DECODE : S1(2);     // any decoder for FPU POP
4926     FPU    : S4;
4927     MEM    : S3;        // any mem
4928 %}
4929 
4930 // UnConditional branch
4931 pipe_class pipe_jmp( label labl ) %{
4932     single_instruction;
4933     BR   : S3;
4934 %}
4935 
4936 // Conditional branch
4937 pipe_class pipe_jcc( cmpOp cmp, eFlagsReg cr, label labl ) %{
4938     single_instruction;
4939     cr    : S1(read);
4940     BR    : S3;
4941 %}
4942 
4943 // Allocation idiom
4944 pipe_class pipe_cmpxchg( eRegP dst, eRegP heap_ptr ) %{
4945     instruction_count(1); force_serialization;
4946     fixed_latency(6);
4947     heap_ptr : S3(read);
4948     DECODE   : S0(3);
4949     D0       : S2;
4950     MEM      : S3;
4951     ALU      : S3(2);
4952     dst      : S5(write);
4953     BR       : S5;
4954 %}
4955 
4956 // Generic big/slow expanded idiom
4957 pipe_class pipe_slow(  ) %{
4958     instruction_count(10); multiple_bundles; force_serialization;
4959     fixed_latency(100);
4960     D0  : S0(2);
4961     MEM : S3(2);
4962 %}
4963 
4964 // The real do-nothing guy
4965 pipe_class empty( ) %{
4966     instruction_count(0);
4967 %}
4968 
4969 // Define the class for the Nop node
4970 define %{
4971    MachNop = empty;
4972 %}
4973 
4974 %}
4975 
4976 //----------INSTRUCTIONS-------------------------------------------------------
4977 //
4978 // match      -- States which machine-independent subtree may be replaced
4979 //               by this instruction.
4980 // ins_cost   -- The estimated cost of this instruction is used by instruction
4981 //               selection to identify a minimum cost tree of machine
4982 //               instructions that matches a tree of machine-independent
4983 //               instructions.
4984 // format     -- A string providing the disassembly for this instruction.
4985 //               The value of an instruction's operand may be inserted
4986 //               by referring to it with a '$' prefix.
4987 // opcode     -- Three instruction opcodes may be provided.  These are referred
4988 //               to within an encode class as $primary, $secondary, and $tertiary
4989 //               respectively.  The primary opcode is commonly used to
4990 //               indicate the type of machine instruction, while secondary
4991 //               and tertiary are often used for prefix options or addressing
4992 //               modes.
4993 // ins_encode -- A list of encode classes with parameters. The encode class
4994 //               name must have been defined in an 'enc_class' specification
4995 //               in the encode section of the architecture description.
4996 
4997 //----------BSWAP-Instruction--------------------------------------------------
4998 instruct bytes_reverse_int(rRegI dst) %{
4999   match(Set dst (ReverseBytesI dst));
5000 
5001   format %{ "BSWAP  $dst" %}
5002   opcode(0x0F, 0xC8);
5003   ins_encode( OpcP, OpcSReg(dst) );
5004   ins_pipe( ialu_reg );
5005 %}
5006 
5007 instruct bytes_reverse_long(eRegL dst) %{
5008   match(Set dst (ReverseBytesL dst));
5009 
5010   format %{ "BSWAP  $dst.lo\n\t"
5011             "BSWAP  $dst.hi\n\t"
5012             "XCHG   $dst.lo $dst.hi" %}
5013 
5014   ins_cost(125);
5015   ins_encode( bswap_long_bytes(dst) );
5016   ins_pipe( ialu_reg_reg);
5017 %}
5018 
5019 instruct bytes_reverse_unsigned_short(rRegI dst, eFlagsReg cr) %{
5020   match(Set dst (ReverseBytesUS dst));
5021   effect(KILL cr);
5022 
5023   format %{ "BSWAP  $dst\n\t" 
5024             "SHR    $dst,16\n\t" %}
5025   ins_encode %{
5026     __ bswapl($dst$$Register);
5027     __ shrl($dst$$Register, 16); 
5028   %}
5029   ins_pipe( ialu_reg );
5030 %}
5031 
5032 instruct bytes_reverse_short(rRegI dst, eFlagsReg cr) %{
5033   match(Set dst (ReverseBytesS dst));
5034   effect(KILL cr);
5035 
5036   format %{ "BSWAP  $dst\n\t" 
5037             "SAR    $dst,16\n\t" %}
5038   ins_encode %{
5039     __ bswapl($dst$$Register);
5040     __ sarl($dst$$Register, 16); 
5041   %}
5042   ins_pipe( ialu_reg );
5043 %}
5044 
5045 
5046 //---------- Zeros Count Instructions ------------------------------------------
5047 
5048 instruct countLeadingZerosI(rRegI dst, rRegI src, eFlagsReg cr) %{
5049   predicate(UseCountLeadingZerosInstruction);
5050   match(Set dst (CountLeadingZerosI src));
5051   effect(KILL cr);
5052 
5053   format %{ "LZCNT  $dst, $src\t# count leading zeros (int)" %}
5054   ins_encode %{
5055     __ lzcntl($dst$$Register, $src$$Register);
5056   %}
5057   ins_pipe(ialu_reg);
5058 %}
5059 
5060 instruct countLeadingZerosI_bsr(rRegI dst, rRegI src, eFlagsReg cr) %{
5061   predicate(!UseCountLeadingZerosInstruction);
5062   match(Set dst (CountLeadingZerosI src));
5063   effect(KILL cr);
5064 
5065   format %{ "BSR    $dst, $src\t# count leading zeros (int)\n\t"
5066             "JNZ    skip\n\t"
5067             "MOV    $dst, -1\n"
5068       "skip:\n\t"
5069             "NEG    $dst\n\t"
5070             "ADD    $dst, 31" %}
5071   ins_encode %{
5072     Register Rdst = $dst$$Register;
5073     Register Rsrc = $src$$Register;
5074     Label skip;
5075     __ bsrl(Rdst, Rsrc);
5076     __ jccb(Assembler::notZero, skip);
5077     __ movl(Rdst, -1);
5078     __ bind(skip);
5079     __ negl(Rdst);
5080     __ addl(Rdst, BitsPerInt - 1);
5081   %}
5082   ins_pipe(ialu_reg);
5083 %}
5084 
5085 instruct countLeadingZerosL(rRegI dst, eRegL src, eFlagsReg cr) %{
5086   predicate(UseCountLeadingZerosInstruction);
5087   match(Set dst (CountLeadingZerosL src));
5088   effect(TEMP dst, KILL cr);
5089 
5090   format %{ "LZCNT  $dst, $src.hi\t# count leading zeros (long)\n\t"
5091             "JNC    done\n\t"
5092             "LZCNT  $dst, $src.lo\n\t"
5093             "ADD    $dst, 32\n"
5094       "done:" %}
5095   ins_encode %{
5096     Register Rdst = $dst$$Register;
5097     Register Rsrc = $src$$Register;
5098     Label done;
5099     __ lzcntl(Rdst, HIGH_FROM_LOW(Rsrc));
5100     __ jccb(Assembler::carryClear, done);
5101     __ lzcntl(Rdst, Rsrc);
5102     __ addl(Rdst, BitsPerInt);
5103     __ bind(done);
5104   %}
5105   ins_pipe(ialu_reg);
5106 %}
5107 
5108 instruct countLeadingZerosL_bsr(rRegI dst, eRegL src, eFlagsReg cr) %{
5109   predicate(!UseCountLeadingZerosInstruction);
5110   match(Set dst (CountLeadingZerosL src));
5111   effect(TEMP dst, KILL cr);
5112 
5113   format %{ "BSR    $dst, $src.hi\t# count leading zeros (long)\n\t"
5114             "JZ     msw_is_zero\n\t"
5115             "ADD    $dst, 32\n\t"
5116             "JMP    not_zero\n"
5117       "msw_is_zero:\n\t"
5118             "BSR    $dst, $src.lo\n\t"
5119             "JNZ    not_zero\n\t"
5120             "MOV    $dst, -1\n"
5121       "not_zero:\n\t"
5122             "NEG    $dst\n\t"
5123             "ADD    $dst, 63\n" %}
5124  ins_encode %{
5125     Register Rdst = $dst$$Register;
5126     Register Rsrc = $src$$Register;
5127     Label msw_is_zero;
5128     Label not_zero;
5129     __ bsrl(Rdst, HIGH_FROM_LOW(Rsrc));
5130     __ jccb(Assembler::zero, msw_is_zero);
5131     __ addl(Rdst, BitsPerInt);
5132     __ jmpb(not_zero);
5133     __ bind(msw_is_zero);
5134     __ bsrl(Rdst, Rsrc);
5135     __ jccb(Assembler::notZero, not_zero);
5136     __ movl(Rdst, -1);
5137     __ bind(not_zero);
5138     __ negl(Rdst);
5139     __ addl(Rdst, BitsPerLong - 1);
5140   %}
5141   ins_pipe(ialu_reg);
5142 %}
5143 
5144 instruct countTrailingZerosI(rRegI dst, rRegI src, eFlagsReg cr) %{
5145   predicate(UseCountTrailingZerosInstruction);
5146   match(Set dst (CountTrailingZerosI src));
5147   effect(KILL cr);
5148 
5149   format %{ "TZCNT    $dst, $src\t# count trailing zeros (int)" %}
5150   ins_encode %{
5151     __ tzcntl($dst$$Register, $src$$Register);
5152   %}
5153   ins_pipe(ialu_reg);
5154 %}
5155 
5156 instruct countTrailingZerosI_bsf(rRegI dst, rRegI src, eFlagsReg cr) %{
5157   predicate(!UseCountTrailingZerosInstruction);
5158   match(Set dst (CountTrailingZerosI src));
5159   effect(KILL cr);
5160 
5161   format %{ "BSF    $dst, $src\t# count trailing zeros (int)\n\t"
5162             "JNZ    done\n\t"
5163             "MOV    $dst, 32\n"
5164       "done:" %}
5165   ins_encode %{
5166     Register Rdst = $dst$$Register;
5167     Label done;
5168     __ bsfl(Rdst, $src$$Register);
5169     __ jccb(Assembler::notZero, done);
5170     __ movl(Rdst, BitsPerInt);
5171     __ bind(done);
5172   %}
5173   ins_pipe(ialu_reg);
5174 %}
5175 
5176 instruct countTrailingZerosL(rRegI dst, eRegL src, eFlagsReg cr) %{
5177   predicate(UseCountTrailingZerosInstruction);
5178   match(Set dst (CountTrailingZerosL src));
5179   effect(TEMP dst, KILL cr);
5180 
5181   format %{ "TZCNT  $dst, $src.lo\t# count trailing zeros (long) \n\t"
5182             "JNC    done\n\t"
5183             "TZCNT  $dst, $src.hi\n\t"
5184             "ADD    $dst, 32\n"
5185             "done:" %}
5186   ins_encode %{
5187     Register Rdst = $dst$$Register;
5188     Register Rsrc = $src$$Register;
5189     Label done;
5190     __ tzcntl(Rdst, Rsrc);
5191     __ jccb(Assembler::carryClear, done);
5192     __ tzcntl(Rdst, HIGH_FROM_LOW(Rsrc));
5193     __ addl(Rdst, BitsPerInt);
5194     __ bind(done);
5195   %}
5196   ins_pipe(ialu_reg);
5197 %}
5198 
5199 instruct countTrailingZerosL_bsf(rRegI dst, eRegL src, eFlagsReg cr) %{
5200   predicate(!UseCountTrailingZerosInstruction);
5201   match(Set dst (CountTrailingZerosL src));
5202   effect(TEMP dst, KILL cr);
5203 
5204   format %{ "BSF    $dst, $src.lo\t# count trailing zeros (long)\n\t"
5205             "JNZ    done\n\t"
5206             "BSF    $dst, $src.hi\n\t"
5207             "JNZ    msw_not_zero\n\t"
5208             "MOV    $dst, 32\n"
5209       "msw_not_zero:\n\t"
5210             "ADD    $dst, 32\n"
5211       "done:" %}
5212   ins_encode %{
5213     Register Rdst = $dst$$Register;
5214     Register Rsrc = $src$$Register;
5215     Label msw_not_zero;
5216     Label done;
5217     __ bsfl(Rdst, Rsrc);
5218     __ jccb(Assembler::notZero, done);
5219     __ bsfl(Rdst, HIGH_FROM_LOW(Rsrc));
5220     __ jccb(Assembler::notZero, msw_not_zero);
5221     __ movl(Rdst, BitsPerInt);
5222     __ bind(msw_not_zero);
5223     __ addl(Rdst, BitsPerInt);
5224     __ bind(done);
5225   %}
5226   ins_pipe(ialu_reg);
5227 %}
5228 
5229 
5230 //---------- Population Count Instructions -------------------------------------
5231 
5232 instruct popCountI(rRegI dst, rRegI src, eFlagsReg cr) %{
5233   predicate(UsePopCountInstruction);
5234   match(Set dst (PopCountI src));
5235   effect(KILL cr);
5236 
5237   format %{ "POPCNT $dst, $src" %}
5238   ins_encode %{
5239     __ popcntl($dst$$Register, $src$$Register);
5240   %}
5241   ins_pipe(ialu_reg);
5242 %}
5243 
5244 instruct popCountI_mem(rRegI dst, memory mem, eFlagsReg cr) %{
5245   predicate(UsePopCountInstruction);
5246   match(Set dst (PopCountI (LoadI mem)));
5247   effect(KILL cr);
5248 
5249   format %{ "POPCNT $dst, $mem" %}
5250   ins_encode %{
5251     __ popcntl($dst$$Register, $mem$$Address);
5252   %}
5253   ins_pipe(ialu_reg);
5254 %}
5255 
5256 // Note: Long.bitCount(long) returns an int.
5257 instruct popCountL(rRegI dst, eRegL src, rRegI tmp, eFlagsReg cr) %{
5258   predicate(UsePopCountInstruction);
5259   match(Set dst (PopCountL src));
5260   effect(KILL cr, TEMP tmp, TEMP dst);
5261 
5262   format %{ "POPCNT $dst, $src.lo\n\t"
5263             "POPCNT $tmp, $src.hi\n\t"
5264             "ADD    $dst, $tmp" %}
5265   ins_encode %{
5266     __ popcntl($dst$$Register, $src$$Register);
5267     __ popcntl($tmp$$Register, HIGH_FROM_LOW($src$$Register));
5268     __ addl($dst$$Register, $tmp$$Register);
5269   %}
5270   ins_pipe(ialu_reg);
5271 %}
5272 
5273 // Note: Long.bitCount(long) returns an int.
5274 instruct popCountL_mem(rRegI dst, memory mem, rRegI tmp, eFlagsReg cr) %{
5275   predicate(UsePopCountInstruction);
5276   match(Set dst (PopCountL (LoadL mem)));
5277   effect(KILL cr, TEMP tmp, TEMP dst);
5278 
5279   format %{ "POPCNT $dst, $mem\n\t"
5280             "POPCNT $tmp, $mem+4\n\t"
5281             "ADD    $dst, $tmp" %}
5282   ins_encode %{
5283     //__ popcntl($dst$$Register, $mem$$Address$$first);
5284     //__ popcntl($tmp$$Register, $mem$$Address$$second);
5285     __ popcntl($dst$$Register, Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none));
5286     __ popcntl($tmp$$Register, Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp + 4, relocInfo::none));
5287     __ addl($dst$$Register, $tmp$$Register);
5288   %}
5289   ins_pipe(ialu_reg);
5290 %}
5291 
5292 
5293 //----------Load/Store/Move Instructions---------------------------------------
5294 //----------Load Instructions--------------------------------------------------
5295 // Load Byte (8bit signed)
5296 instruct loadB(xRegI dst, memory mem) %{
5297   match(Set dst (LoadB mem));
5298 
5299   ins_cost(125);
5300   format %{ "MOVSX8 $dst,$mem\t# byte" %}
5301 
5302   ins_encode %{
5303     __ movsbl($dst$$Register, $mem$$Address);
5304   %}
5305 
5306   ins_pipe(ialu_reg_mem);
5307 %}
5308 
5309 // Load Byte (8bit signed) into Long Register
5310 instruct loadB2L(eRegL dst, memory mem, eFlagsReg cr) %{
5311   match(Set dst (ConvI2L (LoadB mem)));
5312   effect(KILL cr);
5313 
5314   ins_cost(375);
5315   format %{ "MOVSX8 $dst.lo,$mem\t# byte -> long\n\t"
5316             "MOV    $dst.hi,$dst.lo\n\t"
5317             "SAR    $dst.hi,7" %}
5318 
5319   ins_encode %{
5320     __ movsbl($dst$$Register, $mem$$Address);
5321     __ movl(HIGH_FROM_LOW($dst$$Register), $dst$$Register); // This is always a different register.
5322     __ sarl(HIGH_FROM_LOW($dst$$Register), 7); // 24+1 MSB are already signed extended.
5323   %}
5324 
5325   ins_pipe(ialu_reg_mem);
5326 %}
5327 
5328 // Load Unsigned Byte (8bit UNsigned)
5329 instruct loadUB(xRegI dst, memory mem) %{
5330   match(Set dst (LoadUB mem));
5331 
5332   ins_cost(125);
5333   format %{ "MOVZX8 $dst,$mem\t# ubyte -> int" %}
5334 
5335   ins_encode %{
5336     __ movzbl($dst$$Register, $mem$$Address);
5337   %}
5338 
5339   ins_pipe(ialu_reg_mem);
5340 %}
5341 
5342 // Load Unsigned Byte (8 bit UNsigned) into Long Register
5343 instruct loadUB2L(eRegL dst, memory mem, eFlagsReg cr) %{
5344   match(Set dst (ConvI2L (LoadUB mem)));
5345   effect(KILL cr);
5346 
5347   ins_cost(250);
5348   format %{ "MOVZX8 $dst.lo,$mem\t# ubyte -> long\n\t"
5349             "XOR    $dst.hi,$dst.hi" %}
5350 
5351   ins_encode %{
5352     Register Rdst = $dst$$Register;
5353     __ movzbl(Rdst, $mem$$Address);
5354     __ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst));
5355   %}
5356 
5357   ins_pipe(ialu_reg_mem);
5358 %}
5359 
5360 // Load Unsigned Byte (8 bit UNsigned) with mask into Long Register
5361 instruct loadUB2L_immI8(eRegL dst, memory mem, immI8 mask, eFlagsReg cr) %{
5362   match(Set dst (ConvI2L (AndI (LoadUB mem) mask)));
5363   effect(KILL cr);
5364 
5365   format %{ "MOVZX8 $dst.lo,$mem\t# ubyte & 8-bit mask -> long\n\t"
5366             "XOR    $dst.hi,$dst.hi\n\t"
5367             "AND    $dst.lo,$mask" %}
5368   ins_encode %{
5369     Register Rdst = $dst$$Register;
5370     __ movzbl(Rdst, $mem$$Address);
5371     __ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst));
5372     __ andl(Rdst, $mask$$constant);
5373   %}
5374   ins_pipe(ialu_reg_mem);
5375 %}
5376 
5377 // Load Short (16bit signed)
5378 instruct loadS(rRegI dst, memory mem) %{
5379   match(Set dst (LoadS mem));
5380 
5381   ins_cost(125);
5382   format %{ "MOVSX  $dst,$mem\t# short" %}
5383 
5384   ins_encode %{
5385     __ movswl($dst$$Register, $mem$$Address);
5386   %}
5387 
5388   ins_pipe(ialu_reg_mem);
5389 %}
5390 
5391 // Load Short (16 bit signed) to Byte (8 bit signed)
5392 instruct loadS2B(rRegI dst, memory mem, immI_24 twentyfour) %{
5393   match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
5394 
5395   ins_cost(125);
5396   format %{ "MOVSX  $dst, $mem\t# short -> byte" %}
5397   ins_encode %{
5398     __ movsbl($dst$$Register, $mem$$Address);
5399   %}
5400   ins_pipe(ialu_reg_mem);
5401 %}
5402 
5403 // Load Short (16bit signed) into Long Register
5404 instruct loadS2L(eRegL dst, memory mem, eFlagsReg cr) %{
5405   match(Set dst (ConvI2L (LoadS mem)));
5406   effect(KILL cr);
5407 
5408   ins_cost(375);
5409   format %{ "MOVSX  $dst.lo,$mem\t# short -> long\n\t"
5410             "MOV    $dst.hi,$dst.lo\n\t"
5411             "SAR    $dst.hi,15" %}
5412 
5413   ins_encode %{
5414     __ movswl($dst$$Register, $mem$$Address);
5415     __ movl(HIGH_FROM_LOW($dst$$Register), $dst$$Register); // This is always a different register.
5416     __ sarl(HIGH_FROM_LOW($dst$$Register), 15); // 16+1 MSB are already signed extended.
5417   %}
5418 
5419   ins_pipe(ialu_reg_mem);
5420 %}
5421 
5422 // Load Unsigned Short/Char (16bit unsigned)
5423 instruct loadUS(rRegI dst, memory mem) %{
5424   match(Set dst (LoadUS mem));
5425 
5426   ins_cost(125);
5427   format %{ "MOVZX  $dst,$mem\t# ushort/char -> int" %}
5428 
5429   ins_encode %{
5430     __ movzwl($dst$$Register, $mem$$Address);
5431   %}
5432 
5433   ins_pipe(ialu_reg_mem);
5434 %}
5435 
5436 // Load Unsigned Short/Char (16 bit UNsigned) to Byte (8 bit signed)
5437 instruct loadUS2B(rRegI dst, memory mem, immI_24 twentyfour) %{
5438   match(Set dst (RShiftI (LShiftI (LoadUS mem) twentyfour) twentyfour));
5439 
5440   ins_cost(125);
5441   format %{ "MOVSX  $dst, $mem\t# ushort -> byte" %}
5442   ins_encode %{
5443     __ movsbl($dst$$Register, $mem$$Address);
5444   %}
5445   ins_pipe(ialu_reg_mem);
5446 %}
5447 
5448 // Load Unsigned Short/Char (16 bit UNsigned) into Long Register
5449 instruct loadUS2L(eRegL dst, memory mem, eFlagsReg cr) %{
5450   match(Set dst (ConvI2L (LoadUS mem)));
5451   effect(KILL cr);
5452 
5453   ins_cost(250);
5454   format %{ "MOVZX  $dst.lo,$mem\t# ushort/char -> long\n\t"
5455             "XOR    $dst.hi,$dst.hi" %}
5456 
5457   ins_encode %{
5458     __ movzwl($dst$$Register, $mem$$Address);
5459     __ xorl(HIGH_FROM_LOW($dst$$Register), HIGH_FROM_LOW($dst$$Register));
5460   %}
5461 
5462   ins_pipe(ialu_reg_mem);
5463 %}
5464 
5465 // Load Unsigned Short/Char (16 bit UNsigned) with mask 0xFF into Long Register
5466 instruct loadUS2L_immI_255(eRegL dst, memory mem, immI_255 mask, eFlagsReg cr) %{
5467   match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
5468   effect(KILL cr);
5469 
5470   format %{ "MOVZX8 $dst.lo,$mem\t# ushort/char & 0xFF -> long\n\t"
5471             "XOR    $dst.hi,$dst.hi" %}
5472   ins_encode %{
5473     Register Rdst = $dst$$Register;
5474     __ movzbl(Rdst, $mem$$Address);
5475     __ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst));
5476   %}
5477   ins_pipe(ialu_reg_mem);
5478 %}
5479 
5480 // Load Unsigned Short/Char (16 bit UNsigned) with a 16-bit mask into Long Register
5481 instruct loadUS2L_immI16(eRegL dst, memory mem, immI16 mask, eFlagsReg cr) %{
5482   match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
5483   effect(KILL cr);
5484 
5485   format %{ "MOVZX  $dst.lo, $mem\t# ushort/char & 16-bit mask -> long\n\t"
5486             "XOR    $dst.hi,$dst.hi\n\t"
5487             "AND    $dst.lo,$mask" %}
5488   ins_encode %{
5489     Register Rdst = $dst$$Register;
5490     __ movzwl(Rdst, $mem$$Address);
5491     __ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst));
5492     __ andl(Rdst, $mask$$constant);
5493   %}
5494   ins_pipe(ialu_reg_mem);
5495 %}
5496 
5497 // Load Integer
5498 instruct loadI(rRegI dst, memory mem) %{
5499   match(Set dst (LoadI mem));
5500 
5501   ins_cost(125);
5502   format %{ "MOV    $dst,$mem\t# int" %}
5503 
5504   ins_encode %{
5505     __ movl($dst$$Register, $mem$$Address);
5506   %}
5507 
5508   ins_pipe(ialu_reg_mem);
5509 %}
5510 
5511 // Load Integer (32 bit signed) to Byte (8 bit signed)
5512 instruct loadI2B(rRegI dst, memory mem, immI_24 twentyfour) %{
5513   match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
5514 
5515   ins_cost(125);
5516   format %{ "MOVSX  $dst, $mem\t# int -> byte" %}
5517   ins_encode %{
5518     __ movsbl($dst$$Register, $mem$$Address);
5519   %}
5520   ins_pipe(ialu_reg_mem);
5521 %}
5522 
5523 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
5524 instruct loadI2UB(rRegI dst, memory mem, immI_255 mask) %{
5525   match(Set dst (AndI (LoadI mem) mask));
5526 
5527   ins_cost(125);
5528   format %{ "MOVZX  $dst, $mem\t# int -> ubyte" %}
5529   ins_encode %{
5530     __ movzbl($dst$$Register, $mem$$Address);
5531   %}
5532   ins_pipe(ialu_reg_mem);
5533 %}
5534 
5535 // Load Integer (32 bit signed) to Short (16 bit signed)
5536 instruct loadI2S(rRegI dst, memory mem, immI_16 sixteen) %{
5537   match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
5538 
5539   ins_cost(125);
5540   format %{ "MOVSX  $dst, $mem\t# int -> short" %}
5541   ins_encode %{
5542     __ movswl($dst$$Register, $mem$$Address);
5543   %}
5544   ins_pipe(ialu_reg_mem);
5545 %}
5546 
5547 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
5548 instruct loadI2US(rRegI dst, memory mem, immI_65535 mask) %{
5549   match(Set dst (AndI (LoadI mem) mask));
5550 
5551   ins_cost(125);
5552   format %{ "MOVZX  $dst, $mem\t# int -> ushort/char" %}
5553   ins_encode %{
5554     __ movzwl($dst$$Register, $mem$$Address);
5555   %}
5556   ins_pipe(ialu_reg_mem);
5557 %}
5558 
5559 // Load Integer into Long Register
5560 instruct loadI2L(eRegL dst, memory mem, eFlagsReg cr) %{
5561   match(Set dst (ConvI2L (LoadI mem)));
5562   effect(KILL cr);
5563 
5564   ins_cost(375);
5565   format %{ "MOV    $dst.lo,$mem\t# int -> long\n\t"
5566             "MOV    $dst.hi,$dst.lo\n\t"
5567             "SAR    $dst.hi,31" %}
5568 
5569   ins_encode %{
5570     __ movl($dst$$Register, $mem$$Address);
5571     __ movl(HIGH_FROM_LOW($dst$$Register), $dst$$Register); // This is always a different register.
5572     __ sarl(HIGH_FROM_LOW($dst$$Register), 31);
5573   %}
5574 
5575   ins_pipe(ialu_reg_mem);
5576 %}
5577 
5578 // Load Integer with mask 0xFF into Long Register
5579 instruct loadI2L_immI_255(eRegL dst, memory mem, immI_255 mask, eFlagsReg cr) %{
5580   match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
5581   effect(KILL cr);
5582 
5583   format %{ "MOVZX8 $dst.lo,$mem\t# int & 0xFF -> long\n\t"
5584             "XOR    $dst.hi,$dst.hi" %}
5585   ins_encode %{
5586     Register Rdst = $dst$$Register;
5587     __ movzbl(Rdst, $mem$$Address);
5588     __ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst));
5589   %}
5590   ins_pipe(ialu_reg_mem);
5591 %}
5592 
5593 // Load Integer with mask 0xFFFF into Long Register
5594 instruct loadI2L_immI_65535(eRegL dst, memory mem, immI_65535 mask, eFlagsReg cr) %{
5595   match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
5596   effect(KILL cr);
5597 
5598   format %{ "MOVZX  $dst.lo,$mem\t# int & 0xFFFF -> long\n\t"
5599             "XOR    $dst.hi,$dst.hi" %}
5600   ins_encode %{
5601     Register Rdst = $dst$$Register;
5602     __ movzwl(Rdst, $mem$$Address);
5603     __ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst));
5604   %}
5605   ins_pipe(ialu_reg_mem);
5606 %}
5607 
5608 // Load Integer with 31-bit mask into Long Register
5609 instruct loadI2L_immU31(eRegL dst, memory mem, immU31 mask, eFlagsReg cr) %{
5610   match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
5611   effect(KILL cr);
5612 
5613   format %{ "MOV    $dst.lo,$mem\t# int & 31-bit mask -> long\n\t"
5614             "XOR    $dst.hi,$dst.hi\n\t"
5615             "AND    $dst.lo,$mask" %}
5616   ins_encode %{
5617     Register Rdst = $dst$$Register;
5618     __ movl(Rdst, $mem$$Address);
5619     __ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst));
5620     __ andl(Rdst, $mask$$constant);
5621   %}
5622   ins_pipe(ialu_reg_mem);
5623 %}
5624 
5625 // Load Unsigned Integer into Long Register
5626 instruct loadUI2L(eRegL dst, memory mem, immL_32bits mask, eFlagsReg cr) %{
5627   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
5628   effect(KILL cr);
5629 
5630   ins_cost(250);
5631   format %{ "MOV    $dst.lo,$mem\t# uint -> long\n\t"
5632             "XOR    $dst.hi,$dst.hi" %}
5633 
5634   ins_encode %{
5635     __ movl($dst$$Register, $mem$$Address);
5636     __ xorl(HIGH_FROM_LOW($dst$$Register), HIGH_FROM_LOW($dst$$Register));
5637   %}
5638 
5639   ins_pipe(ialu_reg_mem);
5640 %}
5641 
5642 // Load Long.  Cannot clobber address while loading, so restrict address
5643 // register to ESI
5644 instruct loadL(eRegL dst, load_long_memory mem) %{
5645   predicate(!((LoadLNode*)n)->require_atomic_access());
5646   match(Set dst (LoadL mem));
5647 
5648   ins_cost(250);
5649   format %{ "MOV    $dst.lo,$mem\t# long\n\t"
5650             "MOV    $dst.hi,$mem+4" %}
5651 
5652   ins_encode %{
5653     Address Amemlo = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
5654     Address Amemhi = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp + 4, relocInfo::none);
5655     __ movl($dst$$Register, Amemlo);
5656     __ movl(HIGH_FROM_LOW($dst$$Register), Amemhi);
5657   %}
5658 
5659   ins_pipe(ialu_reg_long_mem);
5660 %}
5661 
5662 // Volatile Load Long.  Must be atomic, so do 64-bit FILD
5663 // then store it down to the stack and reload on the int
5664 // side.
5665 instruct loadL_volatile(stackSlotL dst, memory mem) %{
5666   predicate(UseSSE<=1 && ((LoadLNode*)n)->require_atomic_access());
5667   match(Set dst (LoadL mem));
5668 
5669   ins_cost(200);
5670   format %{ "FILD   $mem\t# Atomic volatile long load\n\t"
5671             "FISTp  $dst" %}
5672   ins_encode(enc_loadL_volatile(mem,dst));
5673   ins_pipe( fpu_reg_mem );
5674 %}
5675 
5676 instruct loadLX_volatile(stackSlotL dst, memory mem, regD tmp) %{
5677   predicate(UseSSE>=2 && ((LoadLNode*)n)->require_atomic_access());
5678   match(Set dst (LoadL mem));
5679   effect(TEMP tmp);
5680   ins_cost(180);
5681   format %{ "MOVSD  $tmp,$mem\t# Atomic volatile long load\n\t"
5682             "MOVSD  $dst,$tmp" %}
5683   ins_encode %{
5684     __ movdbl($tmp$$XMMRegister, $mem$$Address);
5685     __ movdbl(Address(rsp, $dst$$disp), $tmp$$XMMRegister);
5686   %}
5687   ins_pipe( pipe_slow );
5688 %}
5689 
5690 instruct loadLX_reg_volatile(eRegL dst, memory mem, regD tmp) %{
5691   predicate(UseSSE>=2 && ((LoadLNode*)n)->require_atomic_access());
5692   match(Set dst (LoadL mem));
5693   effect(TEMP tmp);
5694   ins_cost(160);
5695   format %{ "MOVSD  $tmp,$mem\t# Atomic volatile long load\n\t"
5696             "MOVD   $dst.lo,$tmp\n\t"
5697             "PSRLQ  $tmp,32\n\t"
5698             "MOVD   $dst.hi,$tmp" %}
5699   ins_encode %{
5700     __ movdbl($tmp$$XMMRegister, $mem$$Address);
5701     __ movdl($dst$$Register, $tmp$$XMMRegister);
5702     __ psrlq($tmp$$XMMRegister, 32);
5703     __ movdl(HIGH_FROM_LOW($dst$$Register), $tmp$$XMMRegister);
5704   %}
5705   ins_pipe( pipe_slow );
5706 %}
5707 
5708 // Load Range
5709 instruct loadRange(rRegI dst, memory mem) %{
5710   match(Set dst (LoadRange mem));
5711 
5712   ins_cost(125);
5713   format %{ "MOV    $dst,$mem" %}
5714   opcode(0x8B);
5715   ins_encode( OpcP, RegMem(dst,mem));
5716   ins_pipe( ialu_reg_mem );
5717 %}
5718 
5719 
5720 // Load Pointer
5721 instruct loadP(eRegP dst, memory mem) %{
5722   match(Set dst (LoadP mem));
5723 
5724   ins_cost(125);
5725   format %{ "MOV    $dst,$mem" %}
5726   opcode(0x8B);
5727   ins_encode( OpcP, RegMem(dst,mem));
5728   ins_pipe( ialu_reg_mem );
5729 %}
5730 
5731 // Load Klass Pointer
5732 instruct loadKlass(eRegP dst, memory mem) %{
5733   match(Set dst (LoadKlass mem));
5734 
5735   ins_cost(125);
5736   format %{ "MOV    $dst,$mem" %}
5737   opcode(0x8B);
5738   ins_encode( OpcP, RegMem(dst,mem));
5739   ins_pipe( ialu_reg_mem );
5740 %}
5741 
5742 // Load Double
5743 instruct loadDPR(regDPR dst, memory mem) %{
5744   predicate(UseSSE<=1);
5745   match(Set dst (LoadD mem));
5746 
5747   ins_cost(150);
5748   format %{ "FLD_D  ST,$mem\n\t"
5749             "FSTP   $dst" %}
5750   opcode(0xDD);               /* DD /0 */
5751   ins_encode( OpcP, RMopc_Mem(0x00,mem),
5752               Pop_Reg_DPR(dst) );
5753   ins_pipe( fpu_reg_mem );
5754 %}
5755 
5756 // Load Double to XMM
5757 instruct loadD(regD dst, memory mem) %{
5758   predicate(UseSSE>=2 && UseXmmLoadAndClearUpper);
5759   match(Set dst (LoadD mem));
5760   ins_cost(145);
5761   format %{ "MOVSD  $dst,$mem" %}
5762   ins_encode %{
5763     __ movdbl ($dst$$XMMRegister, $mem$$Address);
5764   %}
5765   ins_pipe( pipe_slow );
5766 %}
5767 
5768 instruct loadD_partial(regD dst, memory mem) %{
5769   predicate(UseSSE>=2 && !UseXmmLoadAndClearUpper);
5770   match(Set dst (LoadD mem));
5771   ins_cost(145);
5772   format %{ "MOVLPD $dst,$mem" %}
5773   ins_encode %{
5774     __ movdbl ($dst$$XMMRegister, $mem$$Address);
5775   %}
5776   ins_pipe( pipe_slow );
5777 %}
5778 
5779 // Load to XMM register (single-precision floating point)
5780 // MOVSS instruction
5781 instruct loadF(regF dst, memory mem) %{
5782   predicate(UseSSE>=1);
5783   match(Set dst (LoadF mem));
5784   ins_cost(145);
5785   format %{ "MOVSS  $dst,$mem" %}
5786   ins_encode %{
5787     __ movflt ($dst$$XMMRegister, $mem$$Address);
5788   %}
5789   ins_pipe( pipe_slow );
5790 %}
5791 
5792 // Load Float
5793 instruct loadFPR(regFPR dst, memory mem) %{
5794   predicate(UseSSE==0);
5795   match(Set dst (LoadF mem));
5796 
5797   ins_cost(150);
5798   format %{ "FLD_S  ST,$mem\n\t"
5799             "FSTP   $dst" %}
5800   opcode(0xD9);               /* D9 /0 */
5801   ins_encode( OpcP, RMopc_Mem(0x00,mem),
5802               Pop_Reg_FPR(dst) );
5803   ins_pipe( fpu_reg_mem );
5804 %}
5805 
5806 // Load Effective Address
5807 instruct leaP8(eRegP dst, indOffset8 mem) %{
5808   match(Set dst mem);
5809 
5810   ins_cost(110);
5811   format %{ "LEA    $dst,$mem" %}
5812   opcode(0x8D);
5813   ins_encode( OpcP, RegMem(dst,mem));
5814   ins_pipe( ialu_reg_reg_fat );
5815 %}
5816 
5817 instruct leaP32(eRegP dst, indOffset32 mem) %{
5818   match(Set dst mem);
5819 
5820   ins_cost(110);
5821   format %{ "LEA    $dst,$mem" %}
5822   opcode(0x8D);
5823   ins_encode( OpcP, RegMem(dst,mem));
5824   ins_pipe( ialu_reg_reg_fat );
5825 %}
5826 
5827 instruct leaPIdxOff(eRegP dst, indIndexOffset mem) %{
5828   match(Set dst mem);
5829 
5830   ins_cost(110);
5831   format %{ "LEA    $dst,$mem" %}
5832   opcode(0x8D);
5833   ins_encode( OpcP, RegMem(dst,mem));
5834   ins_pipe( ialu_reg_reg_fat );
5835 %}
5836 
5837 instruct leaPIdxScale(eRegP dst, indIndexScale mem) %{
5838   match(Set dst mem);
5839 
5840   ins_cost(110);
5841   format %{ "LEA    $dst,$mem" %}
5842   opcode(0x8D);
5843   ins_encode( OpcP, RegMem(dst,mem));
5844   ins_pipe( ialu_reg_reg_fat );
5845 %}
5846 
5847 instruct leaPIdxScaleOff(eRegP dst, indIndexScaleOffset mem) %{
5848   match(Set dst mem);
5849 
5850   ins_cost(110);
5851   format %{ "LEA    $dst,$mem" %}
5852   opcode(0x8D);
5853   ins_encode( OpcP, RegMem(dst,mem));
5854   ins_pipe( ialu_reg_reg_fat );
5855 %}
5856 
5857 // Load Constant
5858 instruct loadConI(rRegI dst, immI src) %{
5859   match(Set dst src);
5860 
5861   format %{ "MOV    $dst,$src" %}
5862   ins_encode( LdImmI(dst, src) );
5863   ins_pipe( ialu_reg_fat );
5864 %}
5865 
5866 // Load Constant zero
5867 instruct loadConI0(rRegI dst, immI0 src, eFlagsReg cr) %{
5868   match(Set dst src);
5869   effect(KILL cr);
5870 
5871   ins_cost(50);
5872   format %{ "XOR    $dst,$dst" %}
5873   opcode(0x33);  /* + rd */
5874   ins_encode( OpcP, RegReg( dst, dst ) );
5875   ins_pipe( ialu_reg );
5876 %}
5877 
5878 instruct loadConP(eRegP dst, immP src) %{
5879   match(Set dst src);
5880 
5881   format %{ "MOV    $dst,$src" %}
5882   opcode(0xB8);  /* + rd */
5883   ins_encode( LdImmP(dst, src) );
5884   ins_pipe( ialu_reg_fat );
5885 %}
5886 
5887 instruct loadConL(eRegL dst, immL src, eFlagsReg cr) %{
5888   match(Set dst src);
5889   effect(KILL cr);
5890   ins_cost(200);
5891   format %{ "MOV    $dst.lo,$src.lo\n\t"
5892             "MOV    $dst.hi,$src.hi" %}
5893   opcode(0xB8);
5894   ins_encode( LdImmL_Lo(dst, src), LdImmL_Hi(dst, src) );
5895   ins_pipe( ialu_reg_long_fat );
5896 %}
5897 
5898 instruct loadConL0(eRegL dst, immL0 src, eFlagsReg cr) %{
5899   match(Set dst src);
5900   effect(KILL cr);
5901   ins_cost(150);
5902   format %{ "XOR    $dst.lo,$dst.lo\n\t"
5903             "XOR    $dst.hi,$dst.hi" %}
5904   opcode(0x33,0x33);
5905   ins_encode( RegReg_Lo(dst,dst), RegReg_Hi(dst, dst) );
5906   ins_pipe( ialu_reg_long );
5907 %}
5908 
5909 // The instruction usage is guarded by predicate in operand immFPR().
5910 instruct loadConFPR(regFPR dst, immFPR con) %{
5911   match(Set dst con);
5912   ins_cost(125);
5913   format %{ "FLD_S  ST,[$constantaddress]\t# load from constant table: float=$con\n\t"
5914             "FSTP   $dst" %}
5915   ins_encode %{
5916     __ fld_s($constantaddress($con));
5917     __ fstp_d($dst$$reg);
5918   %}
5919   ins_pipe(fpu_reg_con);
5920 %}
5921 
5922 // The instruction usage is guarded by predicate in operand immFPR0().
5923 instruct loadConFPR0(regFPR dst, immFPR0 con) %{
5924   match(Set dst con);
5925   ins_cost(125);
5926   format %{ "FLDZ   ST\n\t"
5927             "FSTP   $dst" %}
5928   ins_encode %{
5929     __ fldz();
5930     __ fstp_d($dst$$reg);
5931   %}
5932   ins_pipe(fpu_reg_con);
5933 %}
5934 
5935 // The instruction usage is guarded by predicate in operand immFPR1().
5936 instruct loadConFPR1(regFPR dst, immFPR1 con) %{
5937   match(Set dst con);
5938   ins_cost(125);
5939   format %{ "FLD1   ST\n\t"
5940             "FSTP   $dst" %}
5941   ins_encode %{
5942     __ fld1();
5943     __ fstp_d($dst$$reg);
5944   %}
5945   ins_pipe(fpu_reg_con);
5946 %}
5947 
5948 // The instruction usage is guarded by predicate in operand immF().
5949 instruct loadConF(regF dst, immF con) %{
5950   match(Set dst con);
5951   ins_cost(125);
5952   format %{ "MOVSS  $dst,[$constantaddress]\t# load from constant table: float=$con" %}
5953   ins_encode %{
5954     __ movflt($dst$$XMMRegister, $constantaddress($con));
5955   %}
5956   ins_pipe(pipe_slow);
5957 %}
5958 
5959 // The instruction usage is guarded by predicate in operand immF0().
5960 instruct loadConF0(regF dst, immF0 src) %{
5961   match(Set dst src);
5962   ins_cost(100);
5963   format %{ "XORPS  $dst,$dst\t# float 0.0" %}
5964   ins_encode %{
5965     __ xorps($dst$$XMMRegister, $dst$$XMMRegister);
5966   %}
5967   ins_pipe(pipe_slow);
5968 %}
5969 
5970 // The instruction usage is guarded by predicate in operand immDPR().
5971 instruct loadConDPR(regDPR dst, immDPR con) %{
5972   match(Set dst con);
5973   ins_cost(125);
5974 
5975   format %{ "FLD_D  ST,[$constantaddress]\t# load from constant table: double=$con\n\t"
5976             "FSTP   $dst" %}
5977   ins_encode %{
5978     __ fld_d($constantaddress($con));
5979     __ fstp_d($dst$$reg);
5980   %}
5981   ins_pipe(fpu_reg_con);
5982 %}
5983 
5984 // The instruction usage is guarded by predicate in operand immDPR0().
5985 instruct loadConDPR0(regDPR dst, immDPR0 con) %{
5986   match(Set dst con);
5987   ins_cost(125);
5988 
5989   format %{ "FLDZ   ST\n\t"
5990             "FSTP   $dst" %}
5991   ins_encode %{
5992     __ fldz();
5993     __ fstp_d($dst$$reg);
5994   %}
5995   ins_pipe(fpu_reg_con);
5996 %}
5997 
5998 // The instruction usage is guarded by predicate in operand immDPR1().
5999 instruct loadConDPR1(regDPR dst, immDPR1 con) %{
6000   match(Set dst con);
6001   ins_cost(125);
6002 
6003   format %{ "FLD1   ST\n\t"
6004             "FSTP   $dst" %}
6005   ins_encode %{
6006     __ fld1();
6007     __ fstp_d($dst$$reg);
6008   %}
6009   ins_pipe(fpu_reg_con);
6010 %}
6011 
6012 // The instruction usage is guarded by predicate in operand immD().
6013 instruct loadConD(regD dst, immD con) %{
6014   match(Set dst con);
6015   ins_cost(125);
6016   format %{ "MOVSD  $dst,[$constantaddress]\t# load from constant table: double=$con" %}
6017   ins_encode %{
6018     __ movdbl($dst$$XMMRegister, $constantaddress($con));
6019   %}
6020   ins_pipe(pipe_slow);
6021 %}
6022 
6023 // The instruction usage is guarded by predicate in operand immD0().
6024 instruct loadConD0(regD dst, immD0 src) %{
6025   match(Set dst src);
6026   ins_cost(100);
6027   format %{ "XORPD  $dst,$dst\t# double 0.0" %}
6028   ins_encode %{
6029     __ xorpd ($dst$$XMMRegister, $dst$$XMMRegister);
6030   %}
6031   ins_pipe( pipe_slow );
6032 %}
6033 
6034 // Load Stack Slot
6035 instruct loadSSI(rRegI dst, stackSlotI src) %{
6036   match(Set dst src);
6037   ins_cost(125);
6038 
6039   format %{ "MOV    $dst,$src" %}
6040   opcode(0x8B);
6041   ins_encode( OpcP, RegMem(dst,src));
6042   ins_pipe( ialu_reg_mem );
6043 %}
6044 
6045 instruct loadSSL(eRegL dst, stackSlotL src) %{
6046   match(Set dst src);
6047 
6048   ins_cost(200);
6049   format %{ "MOV    $dst,$src.lo\n\t"
6050             "MOV    $dst+4,$src.hi" %}
6051   opcode(0x8B, 0x8B);
6052   ins_encode( OpcP, RegMem( dst, src ), OpcS, RegMem_Hi( dst, src ) );
6053   ins_pipe( ialu_mem_long_reg );
6054 %}
6055 
6056 // Load Stack Slot
6057 instruct loadSSP(eRegP dst, stackSlotP src) %{
6058   match(Set dst src);
6059   ins_cost(125);
6060 
6061   format %{ "MOV    $dst,$src" %}
6062   opcode(0x8B);
6063   ins_encode( OpcP, RegMem(dst,src));
6064   ins_pipe( ialu_reg_mem );
6065 %}
6066 
6067 // Load Stack Slot
6068 instruct loadSSF(regFPR dst, stackSlotF src) %{
6069   match(Set dst src);
6070   ins_cost(125);
6071 
6072   format %{ "FLD_S  $src\n\t"
6073             "FSTP   $dst" %}
6074   opcode(0xD9);               /* D9 /0, FLD m32real */
6075   ins_encode( OpcP, RMopc_Mem_no_oop(0x00,src),
6076               Pop_Reg_FPR(dst) );
6077   ins_pipe( fpu_reg_mem );
6078 %}
6079 
6080 // Load Stack Slot
6081 instruct loadSSD(regDPR dst, stackSlotD src) %{
6082   match(Set dst src);
6083   ins_cost(125);
6084 
6085   format %{ "FLD_D  $src\n\t"
6086             "FSTP   $dst" %}
6087   opcode(0xDD);               /* DD /0, FLD m64real */
6088   ins_encode( OpcP, RMopc_Mem_no_oop(0x00,src),
6089               Pop_Reg_DPR(dst) );
6090   ins_pipe( fpu_reg_mem );
6091 %}
6092 
6093 // Prefetch instructions for allocation.
6094 // Must be safe to execute with invalid address (cannot fault).
6095 
6096 instruct prefetchAlloc0( memory mem ) %{
6097   predicate(UseSSE==0 && AllocatePrefetchInstr!=3);
6098   match(PrefetchAllocation mem);
6099   ins_cost(0);
6100   size(0);
6101   format %{ "Prefetch allocation (non-SSE is empty encoding)" %}
6102   ins_encode();
6103   ins_pipe(empty);
6104 %}
6105 
6106 instruct prefetchAlloc( memory mem ) %{
6107   predicate(AllocatePrefetchInstr==3);
6108   match( PrefetchAllocation mem );
6109   ins_cost(100);
6110 
6111   format %{ "PREFETCHW $mem\t! Prefetch allocation into L1 cache and mark modified" %}
6112   ins_encode %{
6113     __ prefetchw($mem$$Address);
6114   %}
6115   ins_pipe(ialu_mem);
6116 %}
6117 
6118 instruct prefetchAllocNTA( memory mem ) %{
6119   predicate(UseSSE>=1 && AllocatePrefetchInstr==0);
6120   match(PrefetchAllocation mem);
6121   ins_cost(100);
6122 
6123   format %{ "PREFETCHNTA $mem\t! Prefetch allocation into non-temporal cache for write" %}
6124   ins_encode %{
6125     __ prefetchnta($mem$$Address);
6126   %}
6127   ins_pipe(ialu_mem);
6128 %}
6129 
6130 instruct prefetchAllocT0( memory mem ) %{
6131   predicate(UseSSE>=1 && AllocatePrefetchInstr==1);
6132   match(PrefetchAllocation mem);
6133   ins_cost(100);
6134 
6135   format %{ "PREFETCHT0 $mem\t! Prefetch allocation into L1 and L2 caches for write" %}
6136   ins_encode %{
6137     __ prefetcht0($mem$$Address);
6138   %}
6139   ins_pipe(ialu_mem);
6140 %}
6141 
6142 instruct prefetchAllocT2( memory mem ) %{
6143   predicate(UseSSE>=1 && AllocatePrefetchInstr==2);
6144   match(PrefetchAllocation mem);
6145   ins_cost(100);
6146 
6147   format %{ "PREFETCHT2 $mem\t! Prefetch allocation into L2 cache for write" %}
6148   ins_encode %{
6149     __ prefetcht2($mem$$Address);
6150   %}
6151   ins_pipe(ialu_mem);
6152 %}
6153 
6154 //----------Store Instructions-------------------------------------------------
6155 
6156 // Store Byte
6157 instruct storeB(memory mem, xRegI src) %{
6158   match(Set mem (StoreB mem src));
6159 
6160   ins_cost(125);
6161   format %{ "MOV8   $mem,$src" %}
6162   opcode(0x88);
6163   ins_encode( OpcP, RegMem( src, mem ) );
6164   ins_pipe( ialu_mem_reg );
6165 %}
6166 
6167 // Store Char/Short
6168 instruct storeC(memory mem, rRegI src) %{
6169   match(Set mem (StoreC mem src));
6170 
6171   ins_cost(125);
6172   format %{ "MOV16  $mem,$src" %}
6173   opcode(0x89, 0x66);
6174   ins_encode( OpcS, OpcP, RegMem( src, mem ) );
6175   ins_pipe( ialu_mem_reg );
6176 %}
6177 
6178 // Store Integer
6179 instruct storeI(memory mem, rRegI src) %{
6180   match(Set mem (StoreI mem src));
6181 
6182   ins_cost(125);
6183   format %{ "MOV    $mem,$src" %}
6184   opcode(0x89);
6185   ins_encode( OpcP, RegMem( src, mem ) );
6186   ins_pipe( ialu_mem_reg );
6187 %}
6188 
6189 // Store Long
6190 instruct storeL(long_memory mem, eRegL src) %{
6191   predicate(!((StoreLNode*)n)->require_atomic_access());
6192   match(Set mem (StoreL mem src));
6193 
6194   ins_cost(200);
6195   format %{ "MOV    $mem,$src.lo\n\t"
6196             "MOV    $mem+4,$src.hi" %}
6197   opcode(0x89, 0x89);
6198   ins_encode( OpcP, RegMem( src, mem ), OpcS, RegMem_Hi( src, mem ) );
6199   ins_pipe( ialu_mem_long_reg );
6200 %}
6201 
6202 // Store Long to Integer
6203 instruct storeL2I(memory mem, eRegL src) %{
6204   match(Set mem (StoreI mem (ConvL2I src)));
6205 
6206   format %{ "MOV    $mem,$src.lo\t# long -> int" %}
6207   ins_encode %{
6208     __ movl($mem$$Address, $src$$Register);
6209   %}
6210   ins_pipe(ialu_mem_reg);
6211 %}
6212 
6213 // Volatile Store Long.  Must be atomic, so move it into
6214 // the FP TOS and then do a 64-bit FIST.  Has to probe the
6215 // target address before the store (for null-ptr checks)
6216 // so the memory operand is used twice in the encoding.
6217 instruct storeL_volatile(memory mem, stackSlotL src, eFlagsReg cr ) %{
6218   predicate(UseSSE<=1 && ((StoreLNode*)n)->require_atomic_access());
6219   match(Set mem (StoreL mem src));
6220   effect( KILL cr );
6221   ins_cost(400);
6222   format %{ "CMP    $mem,EAX\t# Probe address for implicit null check\n\t"
6223             "FILD   $src\n\t"
6224             "FISTp  $mem\t # 64-bit atomic volatile long store" %}
6225   opcode(0x3B);
6226   ins_encode( OpcP, RegMem( EAX, mem ), enc_storeL_volatile(mem,src));
6227   ins_pipe( fpu_reg_mem );
6228 %}
6229 
6230 instruct storeLX_volatile(memory mem, stackSlotL src, regD tmp, eFlagsReg cr) %{
6231   predicate(UseSSE>=2 && ((StoreLNode*)n)->require_atomic_access());
6232   match(Set mem (StoreL mem src));
6233   effect( TEMP tmp, KILL cr );
6234   ins_cost(380);
6235   format %{ "CMP    $mem,EAX\t# Probe address for implicit null check\n\t"
6236             "MOVSD  $tmp,$src\n\t"
6237             "MOVSD  $mem,$tmp\t # 64-bit atomic volatile long store" %}
6238   ins_encode %{
6239     __ cmpl(rax, $mem$$Address);
6240     __ movdbl($tmp$$XMMRegister, Address(rsp, $src$$disp));
6241     __ movdbl($mem$$Address, $tmp$$XMMRegister);
6242   %}
6243   ins_pipe( pipe_slow );
6244 %}
6245 
6246 instruct storeLX_reg_volatile(memory mem, eRegL src, regD tmp2, regD tmp, eFlagsReg cr) %{
6247   predicate(UseSSE>=2 && ((StoreLNode*)n)->require_atomic_access());
6248   match(Set mem (StoreL mem src));
6249   effect( TEMP tmp2 , TEMP tmp, KILL cr );
6250   ins_cost(360);
6251   format %{ "CMP    $mem,EAX\t# Probe address for implicit null check\n\t"
6252             "MOVD   $tmp,$src.lo\n\t"
6253             "MOVD   $tmp2,$src.hi\n\t"
6254             "PUNPCKLDQ $tmp,$tmp2\n\t"
6255             "MOVSD  $mem,$tmp\t # 64-bit atomic volatile long store" %}
6256   ins_encode %{
6257     __ cmpl(rax, $mem$$Address);
6258     __ movdl($tmp$$XMMRegister, $src$$Register);
6259     __ movdl($tmp2$$XMMRegister, HIGH_FROM_LOW($src$$Register));
6260     __ punpckldq($tmp$$XMMRegister, $tmp2$$XMMRegister);
6261     __ movdbl($mem$$Address, $tmp$$XMMRegister);
6262   %}
6263   ins_pipe( pipe_slow );
6264 %}
6265 
6266 // Store Pointer; for storing unknown oops and raw pointers
6267 instruct storeP(memory mem, anyRegP src) %{
6268   match(Set mem (StoreP mem src));
6269 
6270   ins_cost(125);
6271   format %{ "MOV    $mem,$src" %}
6272   opcode(0x89);
6273   ins_encode( OpcP, RegMem( src, mem ) );
6274   ins_pipe( ialu_mem_reg );
6275 %}
6276 
6277 // Store Integer Immediate
6278 instruct storeImmI(memory mem, immI src) %{
6279   match(Set mem (StoreI mem src));
6280 
6281   ins_cost(150);
6282   format %{ "MOV    $mem,$src" %}
6283   opcode(0xC7);               /* C7 /0 */
6284   ins_encode( OpcP, RMopc_Mem(0x00,mem),  Con32( src ));
6285   ins_pipe( ialu_mem_imm );
6286 %}
6287 
6288 // Store Short/Char Immediate
6289 instruct storeImmI16(memory mem, immI16 src) %{
6290   predicate(UseStoreImmI16);
6291   match(Set mem (StoreC mem src));
6292 
6293   ins_cost(150);
6294   format %{ "MOV16  $mem,$src" %}
6295   opcode(0xC7);     /* C7 /0 Same as 32 store immediate with prefix */
6296   ins_encode( SizePrefix, OpcP, RMopc_Mem(0x00,mem),  Con16( src ));
6297   ins_pipe( ialu_mem_imm );
6298 %}
6299 
6300 // Store Pointer Immediate; null pointers or constant oops that do not
6301 // need card-mark barriers.
6302 instruct storeImmP(memory mem, immP src) %{
6303   match(Set mem (StoreP mem src));
6304 
6305   ins_cost(150);
6306   format %{ "MOV    $mem,$src" %}
6307   opcode(0xC7);               /* C7 /0 */
6308   ins_encode( OpcP, RMopc_Mem(0x00,mem),  Con32( src ));
6309   ins_pipe( ialu_mem_imm );
6310 %}
6311 
6312 // Store Byte Immediate
6313 instruct storeImmB(memory mem, immI8 src) %{
6314   match(Set mem (StoreB mem src));
6315 
6316   ins_cost(150);
6317   format %{ "MOV8   $mem,$src" %}
6318   opcode(0xC6);               /* C6 /0 */
6319   ins_encode( OpcP, RMopc_Mem(0x00,mem),  Con8or32( src ));
6320   ins_pipe( ialu_mem_imm );
6321 %}
6322 
6323 // Store CMS card-mark Immediate
6324 instruct storeImmCM(memory mem, immI8 src) %{
6325   match(Set mem (StoreCM mem src));
6326 
6327   ins_cost(150);
6328   format %{ "MOV8   $mem,$src\t! CMS card-mark imm0" %}
6329   opcode(0xC6);               /* C6 /0 */
6330   ins_encode( OpcP, RMopc_Mem(0x00,mem),  Con8or32( src ));
6331   ins_pipe( ialu_mem_imm );
6332 %}
6333 
6334 // Store Double
6335 instruct storeDPR( memory mem, regDPR1 src) %{
6336   predicate(UseSSE<=1);
6337   match(Set mem (StoreD mem src));
6338 
6339   ins_cost(100);
6340   format %{ "FST_D  $mem,$src" %}
6341   opcode(0xDD);       /* DD /2 */
6342   ins_encode( enc_FPR_store(mem,src) );
6343   ins_pipe( fpu_mem_reg );
6344 %}
6345 
6346 // Store double does rounding on x86
6347 instruct storeDPR_rounded( memory mem, regDPR1 src) %{
6348   predicate(UseSSE<=1);
6349   match(Set mem (StoreD mem (RoundDouble src)));
6350 
6351   ins_cost(100);
6352   format %{ "FST_D  $mem,$src\t# round" %}
6353   opcode(0xDD);       /* DD /2 */
6354   ins_encode( enc_FPR_store(mem,src) );
6355   ins_pipe( fpu_mem_reg );
6356 %}
6357 
6358 // Store XMM register to memory (double-precision floating points)
6359 // MOVSD instruction
6360 instruct storeD(memory mem, regD src) %{
6361   predicate(UseSSE>=2);
6362   match(Set mem (StoreD mem src));
6363   ins_cost(95);
6364   format %{ "MOVSD  $mem,$src" %}
6365   ins_encode %{
6366     __ movdbl($mem$$Address, $src$$XMMRegister);
6367   %}
6368   ins_pipe( pipe_slow );
6369 %}
6370 
6371 // Store XMM register to memory (single-precision floating point)
6372 // MOVSS instruction
6373 instruct storeF(memory mem, regF src) %{
6374   predicate(UseSSE>=1);
6375   match(Set mem (StoreF mem src));
6376   ins_cost(95);
6377   format %{ "MOVSS  $mem,$src" %}
6378   ins_encode %{
6379     __ movflt($mem$$Address, $src$$XMMRegister);
6380   %}
6381   ins_pipe( pipe_slow );
6382 %}
6383 
6384 // Store Float
6385 instruct storeFPR( memory mem, regFPR1 src) %{
6386   predicate(UseSSE==0);
6387   match(Set mem (StoreF mem src));
6388 
6389   ins_cost(100);
6390   format %{ "FST_S  $mem,$src" %}
6391   opcode(0xD9);       /* D9 /2 */
6392   ins_encode( enc_FPR_store(mem,src) );
6393   ins_pipe( fpu_mem_reg );
6394 %}
6395 
6396 // Store Float does rounding on x86
6397 instruct storeFPR_rounded( memory mem, regFPR1 src) %{
6398   predicate(UseSSE==0);
6399   match(Set mem (StoreF mem (RoundFloat src)));
6400 
6401   ins_cost(100);
6402   format %{ "FST_S  $mem,$src\t# round" %}
6403   opcode(0xD9);       /* D9 /2 */
6404   ins_encode( enc_FPR_store(mem,src) );
6405   ins_pipe( fpu_mem_reg );
6406 %}
6407 
6408 // Store Float does rounding on x86
6409 instruct storeFPR_Drounded( memory mem, regDPR1 src) %{
6410   predicate(UseSSE<=1);
6411   match(Set mem (StoreF mem (ConvD2F src)));
6412 
6413   ins_cost(100);
6414   format %{ "FST_S  $mem,$src\t# D-round" %}
6415   opcode(0xD9);       /* D9 /2 */
6416   ins_encode( enc_FPR_store(mem,src) );
6417   ins_pipe( fpu_mem_reg );
6418 %}
6419 
6420 // Store immediate Float value (it is faster than store from FPU register)
6421 // The instruction usage is guarded by predicate in operand immFPR().
6422 instruct storeFPR_imm( memory mem, immFPR src) %{
6423   match(Set mem (StoreF mem src));
6424 
6425   ins_cost(50);
6426   format %{ "MOV    $mem,$src\t# store float" %}
6427   opcode(0xC7);               /* C7 /0 */
6428   ins_encode( OpcP, RMopc_Mem(0x00,mem),  Con32FPR_as_bits( src ));
6429   ins_pipe( ialu_mem_imm );
6430 %}
6431 
6432 // Store immediate Float value (it is faster than store from XMM register)
6433 // The instruction usage is guarded by predicate in operand immF().
6434 instruct storeF_imm( memory mem, immF src) %{
6435   match(Set mem (StoreF mem src));
6436 
6437   ins_cost(50);
6438   format %{ "MOV    $mem,$src\t# store float" %}
6439   opcode(0xC7);               /* C7 /0 */
6440   ins_encode( OpcP, RMopc_Mem(0x00,mem),  Con32F_as_bits( src ));
6441   ins_pipe( ialu_mem_imm );
6442 %}
6443 
6444 // Store Integer to stack slot
6445 instruct storeSSI(stackSlotI dst, rRegI src) %{
6446   match(Set dst src);
6447 
6448   ins_cost(100);
6449   format %{ "MOV    $dst,$src" %}
6450   opcode(0x89);
6451   ins_encode( OpcPRegSS( dst, src ) );
6452   ins_pipe( ialu_mem_reg );
6453 %}
6454 
6455 // Store Integer to stack slot
6456 instruct storeSSP(stackSlotP dst, eRegP src) %{
6457   match(Set dst src);
6458 
6459   ins_cost(100);
6460   format %{ "MOV    $dst,$src" %}
6461   opcode(0x89);
6462   ins_encode( OpcPRegSS( dst, src ) );
6463   ins_pipe( ialu_mem_reg );
6464 %}
6465 
6466 // Store Long to stack slot
6467 instruct storeSSL(stackSlotL dst, eRegL src) %{
6468   match(Set dst src);
6469 
6470   ins_cost(200);
6471   format %{ "MOV    $dst,$src.lo\n\t"
6472             "MOV    $dst+4,$src.hi" %}
6473   opcode(0x89, 0x89);
6474   ins_encode( OpcP, RegMem( src, dst ), OpcS, RegMem_Hi( src, dst ) );
6475   ins_pipe( ialu_mem_long_reg );
6476 %}
6477 
6478 //----------MemBar Instructions-----------------------------------------------
6479 // Memory barrier flavors
6480 
6481 instruct membar_acquire() %{
6482   match(MemBarAcquire);
6483   match(LoadFence);
6484   ins_cost(400);
6485 
6486   size(0);
6487   format %{ "MEMBAR-acquire ! (empty encoding)" %}
6488   ins_encode();
6489   ins_pipe(empty);
6490 %}
6491 
6492 instruct membar_acquire_lock() %{
6493   match(MemBarAcquireLock);
6494   ins_cost(0);
6495 
6496   size(0);
6497   format %{ "MEMBAR-acquire (prior CMPXCHG in FastLock so empty encoding)" %}
6498   ins_encode( );
6499   ins_pipe(empty);
6500 %}
6501 
6502 instruct membar_release() %{
6503   match(MemBarRelease);
6504   match(StoreFence);
6505   ins_cost(400);
6506 
6507   size(0);
6508   format %{ "MEMBAR-release ! (empty encoding)" %}
6509   ins_encode( );
6510   ins_pipe(empty);
6511 %}
6512 
6513 instruct membar_release_lock() %{
6514   match(MemBarReleaseLock);
6515   ins_cost(0);
6516 
6517   size(0);
6518   format %{ "MEMBAR-release (a FastUnlock follows so empty encoding)" %}
6519   ins_encode( );
6520   ins_pipe(empty);
6521 %}
6522 
6523 instruct membar_volatile(eFlagsReg cr) %{
6524   match(MemBarVolatile);
6525   effect(KILL cr);
6526   ins_cost(400);
6527 
6528   format %{ 
6529     $$template
6530     if (os::is_MP()) {
6531       $$emit$$"LOCK ADDL [ESP + #0], 0\t! membar_volatile"
6532     } else {
6533       $$emit$$"MEMBAR-volatile ! (empty encoding)"
6534     }
6535   %}
6536   ins_encode %{
6537     __ membar(Assembler::StoreLoad);
6538   %}
6539   ins_pipe(pipe_slow);
6540 %}
6541 
6542 instruct unnecessary_membar_volatile() %{
6543   match(MemBarVolatile);
6544   predicate(Matcher::post_store_load_barrier(n));
6545   ins_cost(0);
6546 
6547   size(0);
6548   format %{ "MEMBAR-volatile (unnecessary so empty encoding)" %}
6549   ins_encode( );
6550   ins_pipe(empty);
6551 %}
6552 
6553 instruct membar_storestore() %{
6554   match(MemBarStoreStore);
6555   ins_cost(0);
6556 
6557   size(0);
6558   format %{ "MEMBAR-storestore (empty encoding)" %}
6559   ins_encode( );
6560   ins_pipe(empty);
6561 %}
6562 
6563 //----------Move Instructions--------------------------------------------------
6564 instruct castX2P(eAXRegP dst, eAXRegI src) %{
6565   match(Set dst (CastX2P src));
6566   format %{ "# X2P  $dst, $src" %}
6567   ins_encode( /*empty encoding*/ );
6568   ins_cost(0);
6569   ins_pipe(empty);
6570 %}
6571 
6572 instruct castP2X(rRegI dst, eRegP src ) %{
6573   match(Set dst (CastP2X src));
6574   ins_cost(50);
6575   format %{ "MOV    $dst, $src\t# CastP2X" %}
6576   ins_encode( enc_Copy( dst, src) );
6577   ins_pipe( ialu_reg_reg );
6578 %}
6579 
6580 //----------Conditional Move---------------------------------------------------
6581 // Conditional move
6582 instruct jmovI_reg(cmpOp cop, eFlagsReg cr, rRegI dst, rRegI src) %{
6583   predicate(!VM_Version::supports_cmov() );
6584   match(Set dst (CMoveI (Binary cop cr) (Binary dst src)));
6585   ins_cost(200);
6586   format %{ "J$cop,us skip\t# signed cmove\n\t"
6587             "MOV    $dst,$src\n"
6588       "skip:" %}
6589   ins_encode %{
6590     Label Lskip;
6591     // Invert sense of branch from sense of CMOV
6592     __ jccb((Assembler::Condition)($cop$$cmpcode^1), Lskip);
6593     __ movl($dst$$Register, $src$$Register);
6594     __ bind(Lskip);
6595   %}
6596   ins_pipe( pipe_cmov_reg );
6597 %}
6598 
6599 instruct jmovI_regU(cmpOpU cop, eFlagsRegU cr, rRegI dst, rRegI src) %{
6600   predicate(!VM_Version::supports_cmov() );
6601   match(Set dst (CMoveI (Binary cop cr) (Binary dst src)));
6602   ins_cost(200);
6603   format %{ "J$cop,us skip\t# unsigned cmove\n\t"
6604             "MOV    $dst,$src\n"
6605       "skip:" %}
6606   ins_encode %{
6607     Label Lskip;
6608     // Invert sense of branch from sense of CMOV
6609     __ jccb((Assembler::Condition)($cop$$cmpcode^1), Lskip);
6610     __ movl($dst$$Register, $src$$Register);
6611     __ bind(Lskip);
6612   %}
6613   ins_pipe( pipe_cmov_reg );
6614 %}
6615 
6616 instruct cmovI_reg(rRegI dst, rRegI src, eFlagsReg cr, cmpOp cop ) %{
6617   predicate(VM_Version::supports_cmov() );
6618   match(Set dst (CMoveI (Binary cop cr) (Binary dst src)));
6619   ins_cost(200);
6620   format %{ "CMOV$cop $dst,$src" %}
6621   opcode(0x0F,0x40);
6622   ins_encode( enc_cmov(cop), RegReg( dst, src ) );
6623   ins_pipe( pipe_cmov_reg );
6624 %}
6625 
6626 instruct cmovI_regU( cmpOpU cop, eFlagsRegU cr, rRegI dst, rRegI src ) %{
6627   predicate(VM_Version::supports_cmov() );
6628   match(Set dst (CMoveI (Binary cop cr) (Binary dst src)));
6629   ins_cost(200);
6630   format %{ "CMOV$cop $dst,$src" %}
6631   opcode(0x0F,0x40);
6632   ins_encode( enc_cmov(cop), RegReg( dst, src ) );
6633   ins_pipe( pipe_cmov_reg );
6634 %}
6635 
6636 instruct cmovI_regUCF( cmpOpUCF cop, eFlagsRegUCF cr, rRegI dst, rRegI src ) %{
6637   predicate(VM_Version::supports_cmov() );
6638   match(Set dst (CMoveI (Binary cop cr) (Binary dst src)));
6639   ins_cost(200);
6640   expand %{
6641     cmovI_regU(cop, cr, dst, src);
6642   %}
6643 %}
6644 
6645 // Conditional move
6646 instruct cmovI_mem(cmpOp cop, eFlagsReg cr, rRegI dst, memory src) %{
6647   predicate(VM_Version::supports_cmov() );
6648   match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src))));
6649   ins_cost(250);
6650   format %{ "CMOV$cop $dst,$src" %}
6651   opcode(0x0F,0x40);
6652   ins_encode( enc_cmov(cop), RegMem( dst, src ) );
6653   ins_pipe( pipe_cmov_mem );
6654 %}
6655 
6656 // Conditional move
6657 instruct cmovI_memU(cmpOpU cop, eFlagsRegU cr, rRegI dst, memory src) %{
6658   predicate(VM_Version::supports_cmov() );
6659   match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src))));
6660   ins_cost(250);
6661   format %{ "CMOV$cop $dst,$src" %}
6662   opcode(0x0F,0x40);
6663   ins_encode( enc_cmov(cop), RegMem( dst, src ) );
6664   ins_pipe( pipe_cmov_mem );
6665 %}
6666 
6667 instruct cmovI_memUCF(cmpOpUCF cop, eFlagsRegUCF cr, rRegI dst, memory src) %{
6668   predicate(VM_Version::supports_cmov() );
6669   match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src))));
6670   ins_cost(250);
6671   expand %{
6672     cmovI_memU(cop, cr, dst, src);
6673   %}
6674 %}
6675 
6676 // Conditional move
6677 instruct cmovP_reg(eRegP dst, eRegP src, eFlagsReg cr, cmpOp cop ) %{
6678   predicate(VM_Version::supports_cmov() );
6679   match(Set dst (CMoveP (Binary cop cr) (Binary dst src)));
6680   ins_cost(200);
6681   format %{ "CMOV$cop $dst,$src\t# ptr" %}
6682   opcode(0x0F,0x40);
6683   ins_encode( enc_cmov(cop), RegReg( dst, src ) );
6684   ins_pipe( pipe_cmov_reg );
6685 %}
6686 
6687 // Conditional move (non-P6 version)
6688 // Note:  a CMoveP is generated for  stubs and native wrappers
6689 //        regardless of whether we are on a P6, so we
6690 //        emulate a cmov here
6691 instruct cmovP_reg_nonP6(eRegP dst, eRegP src, eFlagsReg cr, cmpOp cop ) %{
6692   match(Set dst (CMoveP (Binary cop cr) (Binary dst src)));
6693   ins_cost(300);
6694   format %{ "Jn$cop   skip\n\t"
6695           "MOV    $dst,$src\t# pointer\n"
6696       "skip:" %}
6697   opcode(0x8b);
6698   ins_encode( enc_cmov_branch(cop, 0x2), OpcP, RegReg(dst, src));
6699   ins_pipe( pipe_cmov_reg );
6700 %}
6701 
6702 // Conditional move
6703 instruct cmovP_regU(cmpOpU cop, eFlagsRegU cr, eRegP dst, eRegP src ) %{
6704   predicate(VM_Version::supports_cmov() );
6705   match(Set dst (CMoveP (Binary cop cr) (Binary dst src)));
6706   ins_cost(200);
6707   format %{ "CMOV$cop $dst,$src\t# ptr" %}
6708   opcode(0x0F,0x40);
6709   ins_encode( enc_cmov(cop), RegReg( dst, src ) );
6710   ins_pipe( pipe_cmov_reg );
6711 %}
6712 
6713 instruct cmovP_regUCF(cmpOpUCF cop, eFlagsRegUCF cr, eRegP dst, eRegP src ) %{
6714   predicate(VM_Version::supports_cmov() );
6715   match(Set dst (CMoveP (Binary cop cr) (Binary dst src)));
6716   ins_cost(200);
6717   expand %{
6718     cmovP_regU(cop, cr, dst, src);
6719   %}
6720 %}
6721 
6722 // DISABLED: Requires the ADLC to emit a bottom_type call that
6723 // correctly meets the two pointer arguments; one is an incoming
6724 // register but the other is a memory operand.  ALSO appears to
6725 // be buggy with implicit null checks.
6726 //
6727 //// Conditional move
6728 //instruct cmovP_mem(cmpOp cop, eFlagsReg cr, eRegP dst, memory src) %{
6729 //  predicate(VM_Version::supports_cmov() );
6730 //  match(Set dst (CMoveP (Binary cop cr) (Binary dst (LoadP src))));
6731 //  ins_cost(250);
6732 //  format %{ "CMOV$cop $dst,$src\t# ptr" %}
6733 //  opcode(0x0F,0x40);
6734 //  ins_encode( enc_cmov(cop), RegMem( dst, src ) );
6735 //  ins_pipe( pipe_cmov_mem );
6736 //%}
6737 //
6738 //// Conditional move
6739 //instruct cmovP_memU(cmpOpU cop, eFlagsRegU cr, eRegP dst, memory src) %{
6740 //  predicate(VM_Version::supports_cmov() );
6741 //  match(Set dst (CMoveP (Binary cop cr) (Binary dst (LoadP src))));
6742 //  ins_cost(250);
6743 //  format %{ "CMOV$cop $dst,$src\t# ptr" %}
6744 //  opcode(0x0F,0x40);
6745 //  ins_encode( enc_cmov(cop), RegMem( dst, src ) );
6746 //  ins_pipe( pipe_cmov_mem );
6747 //%}
6748 
6749 // Conditional move
6750 instruct fcmovDPR_regU(cmpOp_fcmov cop, eFlagsRegU cr, regDPR1 dst, regDPR src) %{
6751   predicate(UseSSE<=1);
6752   match(Set dst (CMoveD (Binary cop cr) (Binary dst src)));
6753   ins_cost(200);
6754   format %{ "FCMOV$cop $dst,$src\t# double" %}
6755   opcode(0xDA);
6756   ins_encode( enc_cmov_dpr(cop,src) );
6757   ins_pipe( pipe_cmovDPR_reg );
6758 %}
6759 
6760 // Conditional move
6761 instruct fcmovFPR_regU(cmpOp_fcmov cop, eFlagsRegU cr, regFPR1 dst, regFPR src) %{
6762   predicate(UseSSE==0);
6763   match(Set dst (CMoveF (Binary cop cr) (Binary dst src)));
6764   ins_cost(200);
6765   format %{ "FCMOV$cop $dst,$src\t# float" %}
6766   opcode(0xDA);
6767   ins_encode( enc_cmov_dpr(cop,src) );
6768   ins_pipe( pipe_cmovDPR_reg );
6769 %}
6770 
6771 // Float CMOV on Intel doesn't handle *signed* compares, only unsigned.
6772 instruct fcmovDPR_regS(cmpOp cop, eFlagsReg cr, regDPR dst, regDPR src) %{
6773   predicate(UseSSE<=1);
6774   match(Set dst (CMoveD (Binary cop cr) (Binary dst src)));
6775   ins_cost(200);
6776   format %{ "Jn$cop   skip\n\t"
6777             "MOV    $dst,$src\t# double\n"
6778       "skip:" %}
6779   opcode (0xdd, 0x3);     /* DD D8+i or DD /3 */
6780   ins_encode( enc_cmov_branch( cop, 0x4 ), Push_Reg_DPR(src), OpcP, RegOpc(dst) );
6781   ins_pipe( pipe_cmovDPR_reg );
6782 %}
6783 
6784 // Float CMOV on Intel doesn't handle *signed* compares, only unsigned.
6785 instruct fcmovFPR_regS(cmpOp cop, eFlagsReg cr, regFPR dst, regFPR src) %{
6786   predicate(UseSSE==0);
6787   match(Set dst (CMoveF (Binary cop cr) (Binary dst src)));
6788   ins_cost(200);
6789   format %{ "Jn$cop    skip\n\t"
6790             "MOV    $dst,$src\t# float\n"
6791       "skip:" %}
6792   opcode (0xdd, 0x3);     /* DD D8+i or DD /3 */
6793   ins_encode( enc_cmov_branch( cop, 0x4 ), Push_Reg_FPR(src), OpcP, RegOpc(dst) );
6794   ins_pipe( pipe_cmovDPR_reg );
6795 %}
6796 
6797 // No CMOVE with SSE/SSE2
6798 instruct fcmovF_regS(cmpOp cop, eFlagsReg cr, regF dst, regF src) %{
6799   predicate (UseSSE>=1);
6800   match(Set dst (CMoveF (Binary cop cr) (Binary dst src)));
6801   ins_cost(200);
6802   format %{ "Jn$cop   skip\n\t"
6803             "MOVSS  $dst,$src\t# float\n"
6804       "skip:" %}
6805   ins_encode %{
6806     Label skip;
6807     // Invert sense of branch from sense of CMOV
6808     __ jccb((Assembler::Condition)($cop$$cmpcode^1), skip);
6809     __ movflt($dst$$XMMRegister, $src$$XMMRegister);
6810     __ bind(skip);
6811   %}
6812   ins_pipe( pipe_slow );
6813 %}
6814 
6815 // No CMOVE with SSE/SSE2
6816 instruct fcmovD_regS(cmpOp cop, eFlagsReg cr, regD dst, regD src) %{
6817   predicate (UseSSE>=2);
6818   match(Set dst (CMoveD (Binary cop cr) (Binary dst src)));
6819   ins_cost(200);
6820   format %{ "Jn$cop   skip\n\t"
6821             "MOVSD  $dst,$src\t# float\n"
6822       "skip:" %}
6823   ins_encode %{
6824     Label skip;
6825     // Invert sense of branch from sense of CMOV
6826     __ jccb((Assembler::Condition)($cop$$cmpcode^1), skip);
6827     __ movdbl($dst$$XMMRegister, $src$$XMMRegister);
6828     __ bind(skip);
6829   %}
6830   ins_pipe( pipe_slow );
6831 %}
6832 
6833 // unsigned version
6834 instruct fcmovF_regU(cmpOpU cop, eFlagsRegU cr, regF dst, regF src) %{
6835   predicate (UseSSE>=1);
6836   match(Set dst (CMoveF (Binary cop cr) (Binary dst src)));
6837   ins_cost(200);
6838   format %{ "Jn$cop   skip\n\t"
6839             "MOVSS  $dst,$src\t# float\n"
6840       "skip:" %}
6841   ins_encode %{
6842     Label skip;
6843     // Invert sense of branch from sense of CMOV
6844     __ jccb((Assembler::Condition)($cop$$cmpcode^1), skip);
6845     __ movflt($dst$$XMMRegister, $src$$XMMRegister);
6846     __ bind(skip);
6847   %}
6848   ins_pipe( pipe_slow );
6849 %}
6850 
6851 instruct fcmovF_regUCF(cmpOpUCF cop, eFlagsRegUCF cr, regF dst, regF src) %{
6852   predicate (UseSSE>=1);
6853   match(Set dst (CMoveF (Binary cop cr) (Binary dst src)));
6854   ins_cost(200);
6855   expand %{
6856     fcmovF_regU(cop, cr, dst, src);
6857   %}
6858 %}
6859 
6860 // unsigned version
6861 instruct fcmovD_regU(cmpOpU cop, eFlagsRegU cr, regD dst, regD src) %{
6862   predicate (UseSSE>=2);
6863   match(Set dst (CMoveD (Binary cop cr) (Binary dst src)));
6864   ins_cost(200);
6865   format %{ "Jn$cop   skip\n\t"
6866             "MOVSD  $dst,$src\t# float\n"
6867       "skip:" %}
6868   ins_encode %{
6869     Label skip;
6870     // Invert sense of branch from sense of CMOV
6871     __ jccb((Assembler::Condition)($cop$$cmpcode^1), skip);
6872     __ movdbl($dst$$XMMRegister, $src$$XMMRegister);
6873     __ bind(skip);
6874   %}
6875   ins_pipe( pipe_slow );
6876 %}
6877 
6878 instruct fcmovD_regUCF(cmpOpUCF cop, eFlagsRegUCF cr, regD dst, regD src) %{
6879   predicate (UseSSE>=2);
6880   match(Set dst (CMoveD (Binary cop cr) (Binary dst src)));
6881   ins_cost(200);
6882   expand %{
6883     fcmovD_regU(cop, cr, dst, src);
6884   %}
6885 %}
6886 
6887 instruct cmovL_reg(cmpOp cop, eFlagsReg cr, eRegL dst, eRegL src) %{
6888   predicate(VM_Version::supports_cmov() );
6889   match(Set dst (CMoveL (Binary cop cr) (Binary dst src)));
6890   ins_cost(200);
6891   format %{ "CMOV$cop $dst.lo,$src.lo\n\t"
6892             "CMOV$cop $dst.hi,$src.hi" %}
6893   opcode(0x0F,0x40);
6894   ins_encode( enc_cmov(cop), RegReg_Lo2( dst, src ), enc_cmov(cop), RegReg_Hi2( dst, src ) );
6895   ins_pipe( pipe_cmov_reg_long );
6896 %}
6897 
6898 instruct cmovL_regU(cmpOpU cop, eFlagsRegU cr, eRegL dst, eRegL src) %{
6899   predicate(VM_Version::supports_cmov() );
6900   match(Set dst (CMoveL (Binary cop cr) (Binary dst src)));
6901   ins_cost(200);
6902   format %{ "CMOV$cop $dst.lo,$src.lo\n\t"
6903             "CMOV$cop $dst.hi,$src.hi" %}
6904   opcode(0x0F,0x40);
6905   ins_encode( enc_cmov(cop), RegReg_Lo2( dst, src ), enc_cmov(cop), RegReg_Hi2( dst, src ) );
6906   ins_pipe( pipe_cmov_reg_long );
6907 %}
6908 
6909 instruct cmovL_regUCF(cmpOpUCF cop, eFlagsRegUCF cr, eRegL dst, eRegL src) %{
6910   predicate(VM_Version::supports_cmov() );
6911   match(Set dst (CMoveL (Binary cop cr) (Binary dst src)));
6912   ins_cost(200);
6913   expand %{
6914     cmovL_regU(cop, cr, dst, src);
6915   %}
6916 %}
6917 
6918 //----------Arithmetic Instructions--------------------------------------------
6919 //----------Addition Instructions----------------------------------------------
6920 
6921 // Integer Addition Instructions
6922 instruct addI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{
6923   match(Set dst (AddI dst src));
6924   effect(KILL cr);
6925 
6926   size(2);
6927   format %{ "ADD    $dst,$src" %}
6928   opcode(0x03);
6929   ins_encode( OpcP, RegReg( dst, src) );
6930   ins_pipe( ialu_reg_reg );
6931 %}
6932 
6933 instruct addI_eReg_imm(rRegI dst, immI src, eFlagsReg cr) %{
6934   match(Set dst (AddI dst src));
6935   effect(KILL cr);
6936 
6937   format %{ "ADD    $dst,$src" %}
6938   opcode(0x81, 0x00); /* /0 id */
6939   ins_encode( OpcSErm( dst, src ), Con8or32( src ) );
6940   ins_pipe( ialu_reg );
6941 %}
6942 
6943 instruct incI_eReg(rRegI dst, immI1 src, eFlagsReg cr) %{
6944   predicate(UseIncDec);
6945   match(Set dst (AddI dst src));
6946   effect(KILL cr);
6947 
6948   size(1);
6949   format %{ "INC    $dst" %}
6950   opcode(0x40); /*  */
6951   ins_encode( Opc_plus( primary, dst ) );
6952   ins_pipe( ialu_reg );
6953 %}
6954 
6955 instruct leaI_eReg_immI(rRegI dst, rRegI src0, immI src1) %{
6956   match(Set dst (AddI src0 src1));
6957   ins_cost(110);
6958 
6959   format %{ "LEA    $dst,[$src0 + $src1]" %}
6960   opcode(0x8D); /* 0x8D /r */
6961   ins_encode( OpcP, RegLea( dst, src0, src1 ) );
6962   ins_pipe( ialu_reg_reg );
6963 %}
6964 
6965 instruct leaP_eReg_immI(eRegP dst, eRegP src0, immI src1) %{
6966   match(Set dst (AddP src0 src1));
6967   ins_cost(110);
6968 
6969   format %{ "LEA    $dst,[$src0 + $src1]\t# ptr" %}
6970   opcode(0x8D); /* 0x8D /r */
6971   ins_encode( OpcP, RegLea( dst, src0, src1 ) );
6972   ins_pipe( ialu_reg_reg );
6973 %}
6974 
6975 instruct decI_eReg(rRegI dst, immI_M1 src, eFlagsReg cr) %{
6976   predicate(UseIncDec);
6977   match(Set dst (AddI dst src));
6978   effect(KILL cr);
6979 
6980   size(1);
6981   format %{ "DEC    $dst" %}
6982   opcode(0x48); /*  */
6983   ins_encode( Opc_plus( primary, dst ) );
6984   ins_pipe( ialu_reg );
6985 %}
6986 
6987 instruct addP_eReg(eRegP dst, rRegI src, eFlagsReg cr) %{
6988   match(Set dst (AddP dst src));
6989   effect(KILL cr);
6990 
6991   size(2);
6992   format %{ "ADD    $dst,$src" %}
6993   opcode(0x03);
6994   ins_encode( OpcP, RegReg( dst, src) );
6995   ins_pipe( ialu_reg_reg );
6996 %}
6997 
6998 instruct addP_eReg_imm(eRegP dst, immI src, eFlagsReg cr) %{
6999   match(Set dst (AddP dst src));
7000   effect(KILL cr);
7001 
7002   format %{ "ADD    $dst,$src" %}
7003   opcode(0x81,0x00); /* Opcode 81 /0 id */
7004   // ins_encode( RegImm( dst, src) );
7005   ins_encode( OpcSErm( dst, src ), Con8or32( src ) );
7006   ins_pipe( ialu_reg );
7007 %}
7008 
7009 instruct addI_eReg_mem(rRegI dst, memory src, eFlagsReg cr) %{
7010   match(Set dst (AddI dst (LoadI src)));
7011   effect(KILL cr);
7012 
7013   ins_cost(125);
7014   format %{ "ADD    $dst,$src" %}
7015   opcode(0x03);
7016   ins_encode( OpcP, RegMem( dst, src) );
7017   ins_pipe( ialu_reg_mem );
7018 %}
7019 
7020 instruct addI_mem_eReg(memory dst, rRegI src, eFlagsReg cr) %{
7021   match(Set dst (StoreI dst (AddI (LoadI dst) src)));
7022   effect(KILL cr);
7023 
7024   ins_cost(150);
7025   format %{ "ADD    $dst,$src" %}
7026   opcode(0x01);  /* Opcode 01 /r */
7027   ins_encode( OpcP, RegMem( src, dst ) );
7028   ins_pipe( ialu_mem_reg );
7029 %}
7030 
7031 // Add Memory with Immediate
7032 instruct addI_mem_imm(memory dst, immI src, eFlagsReg cr) %{
7033   match(Set dst (StoreI dst (AddI (LoadI dst) src)));
7034   effect(KILL cr);
7035 
7036   ins_cost(125);
7037   format %{ "ADD    $dst,$src" %}
7038   opcode(0x81);               /* Opcode 81 /0 id */
7039   ins_encode( OpcSE( src ), RMopc_Mem(0x00,dst), Con8or32( src ) );
7040   ins_pipe( ialu_mem_imm );
7041 %}
7042 
7043 instruct incI_mem(memory dst, immI1 src, eFlagsReg cr) %{
7044   match(Set dst (StoreI dst (AddI (LoadI dst) src)));
7045   effect(KILL cr);
7046 
7047   ins_cost(125);
7048   format %{ "INC    $dst" %}
7049   opcode(0xFF);               /* Opcode FF /0 */
7050   ins_encode( OpcP, RMopc_Mem(0x00,dst));
7051   ins_pipe( ialu_mem_imm );
7052 %}
7053 
7054 instruct decI_mem(memory dst, immI_M1 src, eFlagsReg cr) %{
7055   match(Set dst (StoreI dst (AddI (LoadI dst) src)));
7056   effect(KILL cr);
7057 
7058   ins_cost(125);
7059   format %{ "DEC    $dst" %}
7060   opcode(0xFF);               /* Opcode FF /1 */
7061   ins_encode( OpcP, RMopc_Mem(0x01,dst));
7062   ins_pipe( ialu_mem_imm );
7063 %}
7064 
7065 
7066 instruct checkCastPP( eRegP dst ) %{
7067   match(Set dst (CheckCastPP dst));
7068 
7069   size(0);
7070   format %{ "#checkcastPP of $dst" %}
7071   ins_encode( /*empty encoding*/ );
7072   ins_pipe( empty );
7073 %}
7074 
7075 instruct castPP( eRegP dst ) %{
7076   match(Set dst (CastPP dst));
7077   format %{ "#castPP of $dst" %}
7078   ins_encode( /*empty encoding*/ );
7079   ins_pipe( empty );
7080 %}
7081 
7082 instruct castII( rRegI dst ) %{
7083   match(Set dst (CastII dst));
7084   format %{ "#castII of $dst" %}
7085   ins_encode( /*empty encoding*/ );
7086   ins_cost(0);
7087   ins_pipe( empty );
7088 %}
7089 
7090 
7091 // Load-locked - same as a regular pointer load when used with compare-swap
7092 instruct loadPLocked(eRegP dst, memory mem) %{
7093   match(Set dst (LoadPLocked mem));
7094 
7095   ins_cost(125);
7096   format %{ "MOV    $dst,$mem\t# Load ptr. locked" %}
7097   opcode(0x8B);
7098   ins_encode( OpcP, RegMem(dst,mem));
7099   ins_pipe( ialu_reg_mem );
7100 %}
7101 
7102 // Conditional-store of the updated heap-top.
7103 // Used during allocation of the shared heap.
7104 // Sets flags (EQ) on success.  Implemented with a CMPXCHG on Intel.
7105 instruct storePConditional( memory heap_top_ptr, eAXRegP oldval, eRegP newval, eFlagsReg cr ) %{
7106   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
7107   // EAX is killed if there is contention, but then it's also unused.
7108   // In the common case of no contention, EAX holds the new oop address.
7109   format %{ "CMPXCHG $heap_top_ptr,$newval\t# If EAX==$heap_top_ptr Then store $newval into $heap_top_ptr" %}
7110   ins_encode( lock_prefix, Opcode(0x0F), Opcode(0xB1), RegMem(newval,heap_top_ptr) );
7111   ins_pipe( pipe_cmpxchg );
7112 %}
7113 
7114 // Conditional-store of an int value.
7115 // ZF flag is set on success, reset otherwise.  Implemented with a CMPXCHG on Intel.
7116 instruct storeIConditional( memory mem, eAXRegI oldval, rRegI newval, eFlagsReg cr ) %{
7117   match(Set cr (StoreIConditional mem (Binary oldval newval)));
7118   effect(KILL oldval);
7119   format %{ "CMPXCHG $mem,$newval\t# If EAX==$mem Then store $newval into $mem" %}
7120   ins_encode( lock_prefix, Opcode(0x0F), Opcode(0xB1), RegMem(newval, mem) );
7121   ins_pipe( pipe_cmpxchg );
7122 %}
7123 
7124 // Conditional-store of a long value.
7125 // ZF flag is set on success, reset otherwise.  Implemented with a CMPXCHG8 on Intel.
7126 instruct storeLConditional( memory mem, eADXRegL oldval, eBCXRegL newval, eFlagsReg cr ) %{
7127   match(Set cr (StoreLConditional mem (Binary oldval newval)));
7128   effect(KILL oldval);
7129   format %{ "XCHG   EBX,ECX\t# correct order for CMPXCHG8 instruction\n\t"
7130             "CMPXCHG8 $mem,ECX:EBX\t# If EDX:EAX==$mem Then store ECX:EBX into $mem\n\t"
7131             "XCHG   EBX,ECX"
7132   %}
7133   ins_encode %{
7134     // Note: we need to swap rbx, and rcx before and after the
7135     //       cmpxchg8 instruction because the instruction uses
7136     //       rcx as the high order word of the new value to store but
7137     //       our register encoding uses rbx.
7138     __ xchgl(as_Register(EBX_enc), as_Register(ECX_enc));
7139     if( os::is_MP() )
7140       __ lock();
7141     __ cmpxchg8($mem$$Address);
7142     __ xchgl(as_Register(EBX_enc), as_Register(ECX_enc));
7143   %}
7144   ins_pipe( pipe_cmpxchg );
7145 %}
7146 
7147 // No flag versions for CompareAndSwap{P,I,L} because matcher can't match them
7148 
7149 instruct compareAndSwapL( rRegI res, eSIRegP mem_ptr, eADXRegL oldval, eBCXRegL newval, eFlagsReg cr ) %{
7150   predicate(VM_Version::supports_cx8());
7151   match(Set res (CompareAndSwapL mem_ptr (Binary oldval newval)));
7152   effect(KILL cr, KILL oldval);
7153   format %{ "CMPXCHG8 [$mem_ptr],$newval\t# If EDX:EAX==[$mem_ptr] Then store $newval into [$mem_ptr]\n\t"
7154             "MOV    $res,0\n\t"
7155             "JNE,s  fail\n\t"
7156             "MOV    $res,1\n"
7157           "fail:" %}
7158   ins_encode( enc_cmpxchg8(mem_ptr),
7159               enc_flags_ne_to_boolean(res) );
7160   ins_pipe( pipe_cmpxchg );
7161 %}
7162 
7163 instruct compareAndSwapP( rRegI res,  pRegP mem_ptr, eAXRegP oldval, eCXRegP newval, eFlagsReg cr) %{
7164   match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
7165   effect(KILL cr, KILL oldval);
7166   format %{ "CMPXCHG [$mem_ptr],$newval\t# If EAX==[$mem_ptr] Then store $newval into [$mem_ptr]\n\t"
7167             "MOV    $res,0\n\t"
7168             "JNE,s  fail\n\t"
7169             "MOV    $res,1\n"
7170           "fail:" %}
7171   ins_encode( enc_cmpxchg(mem_ptr), enc_flags_ne_to_boolean(res) );
7172   ins_pipe( pipe_cmpxchg );
7173 %}
7174 
7175 instruct compareAndSwapI( rRegI res, pRegP mem_ptr, eAXRegI oldval, eCXRegI newval, eFlagsReg cr) %{
7176   match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
7177   effect(KILL cr, KILL oldval);
7178   format %{ "CMPXCHG [$mem_ptr],$newval\t# If EAX==[$mem_ptr] Then store $newval into [$mem_ptr]\n\t"
7179             "MOV    $res,0\n\t"
7180             "JNE,s  fail\n\t"
7181             "MOV    $res,1\n"
7182           "fail:" %}
7183   ins_encode( enc_cmpxchg(mem_ptr), enc_flags_ne_to_boolean(res) );
7184   ins_pipe( pipe_cmpxchg );
7185 %}
7186 
7187 instruct xaddI_no_res( memory mem, Universe dummy, immI add, eFlagsReg cr) %{
7188   predicate(n->as_LoadStore()->result_not_used());
7189   match(Set dummy (GetAndAddI mem add));
7190   effect(KILL cr);
7191   format %{ "ADDL  [$mem],$add" %}
7192   ins_encode %{
7193     if (os::is_MP()) { __ lock(); }
7194     __ addl($mem$$Address, $add$$constant);
7195   %}
7196   ins_pipe( pipe_cmpxchg );
7197 %}
7198 
7199 instruct xaddI( memory mem, rRegI newval, eFlagsReg cr) %{
7200   match(Set newval (GetAndAddI mem newval));
7201   effect(KILL cr);
7202   format %{ "XADDL  [$mem],$newval" %}
7203   ins_encode %{
7204     if (os::is_MP()) { __ lock(); }
7205     __ xaddl($mem$$Address, $newval$$Register);
7206   %}
7207   ins_pipe( pipe_cmpxchg );
7208 %}
7209 
7210 instruct xchgI( memory mem, rRegI newval) %{
7211   match(Set newval (GetAndSetI mem newval));
7212   format %{ "XCHGL  $newval,[$mem]" %}
7213   ins_encode %{
7214     __ xchgl($newval$$Register, $mem$$Address);
7215   %}
7216   ins_pipe( pipe_cmpxchg );
7217 %}
7218 
7219 instruct xchgP( memory mem, pRegP newval) %{
7220   match(Set newval (GetAndSetP mem newval));
7221   format %{ "XCHGL  $newval,[$mem]" %}
7222   ins_encode %{
7223     __ xchgl($newval$$Register, $mem$$Address);
7224   %}
7225   ins_pipe( pipe_cmpxchg );
7226 %}
7227 
7228 //----------Subtraction Instructions-------------------------------------------
7229 
7230 // Integer Subtraction Instructions
7231 instruct subI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{
7232   match(Set dst (SubI dst src));
7233   effect(KILL cr);
7234 
7235   size(2);
7236   format %{ "SUB    $dst,$src" %}
7237   opcode(0x2B);
7238   ins_encode( OpcP, RegReg( dst, src) );
7239   ins_pipe( ialu_reg_reg );
7240 %}
7241 
7242 instruct subI_eReg_imm(rRegI dst, immI src, eFlagsReg cr) %{
7243   match(Set dst (SubI dst src));
7244   effect(KILL cr);
7245 
7246   format %{ "SUB    $dst,$src" %}
7247   opcode(0x81,0x05);  /* Opcode 81 /5 */
7248   // ins_encode( RegImm( dst, src) );
7249   ins_encode( OpcSErm( dst, src ), Con8or32( src ) );
7250   ins_pipe( ialu_reg );
7251 %}
7252 
7253 instruct subI_eReg_mem(rRegI dst, memory src, eFlagsReg cr) %{
7254   match(Set dst (SubI dst (LoadI src)));
7255   effect(KILL cr);
7256 
7257   ins_cost(125);
7258   format %{ "SUB    $dst,$src" %}
7259   opcode(0x2B);
7260   ins_encode( OpcP, RegMem( dst, src) );
7261   ins_pipe( ialu_reg_mem );
7262 %}
7263 
7264 instruct subI_mem_eReg(memory dst, rRegI src, eFlagsReg cr) %{
7265   match(Set dst (StoreI dst (SubI (LoadI dst) src)));
7266   effect(KILL cr);
7267 
7268   ins_cost(150);
7269   format %{ "SUB    $dst,$src" %}
7270   opcode(0x29);  /* Opcode 29 /r */
7271   ins_encode( OpcP, RegMem( src, dst ) );
7272   ins_pipe( ialu_mem_reg );
7273 %}
7274 
7275 // Subtract from a pointer
7276 instruct subP_eReg(eRegP dst, rRegI src, immI0 zero, eFlagsReg cr) %{
7277   match(Set dst (AddP dst (SubI zero src)));
7278   effect(KILL cr);
7279 
7280   size(2);
7281   format %{ "SUB    $dst,$src" %}
7282   opcode(0x2B);
7283   ins_encode( OpcP, RegReg( dst, src) );
7284   ins_pipe( ialu_reg_reg );
7285 %}
7286 
7287 instruct negI_eReg(rRegI dst, immI0 zero, eFlagsReg cr) %{
7288   match(Set dst (SubI zero dst));
7289   effect(KILL cr);
7290 
7291   size(2);
7292   format %{ "NEG    $dst" %}
7293   opcode(0xF7,0x03);  // Opcode F7 /3
7294   ins_encode( OpcP, RegOpc( dst ) );
7295   ins_pipe( ialu_reg );
7296 %}
7297 
7298 //----------Multiplication/Division Instructions-------------------------------
7299 // Integer Multiplication Instructions
7300 // Multiply Register
7301 instruct mulI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{
7302   match(Set dst (MulI dst src));
7303   effect(KILL cr);
7304 
7305   size(3);
7306   ins_cost(300);
7307   format %{ "IMUL   $dst,$src" %}
7308   opcode(0xAF, 0x0F);
7309   ins_encode( OpcS, OpcP, RegReg( dst, src) );
7310   ins_pipe( ialu_reg_reg_alu0 );
7311 %}
7312 
7313 // Multiply 32-bit Immediate
7314 instruct mulI_eReg_imm(rRegI dst, rRegI src, immI imm, eFlagsReg cr) %{
7315   match(Set dst (MulI src imm));
7316   effect(KILL cr);
7317 
7318   ins_cost(300);
7319   format %{ "IMUL   $dst,$src,$imm" %}
7320   opcode(0x69);  /* 69 /r id */
7321   ins_encode( OpcSE(imm), RegReg( dst, src ), Con8or32( imm ) );
7322   ins_pipe( ialu_reg_reg_alu0 );
7323 %}
7324 
7325 instruct loadConL_low_only(eADXRegL_low_only dst, immL32 src, eFlagsReg cr) %{
7326   match(Set dst src);
7327   effect(KILL cr);
7328 
7329   // Note that this is artificially increased to make it more expensive than loadConL
7330   ins_cost(250);
7331   format %{ "MOV    EAX,$src\t// low word only" %}
7332   opcode(0xB8);
7333   ins_encode( LdImmL_Lo(dst, src) );
7334   ins_pipe( ialu_reg_fat );
7335 %}
7336 
7337 // Multiply by 32-bit Immediate, taking the shifted high order results
7338 //  (special case for shift by 32)
7339 instruct mulI_imm_high(eDXRegI dst, nadxRegI src1, eADXRegL_low_only src2, immI_32 cnt, eFlagsReg cr) %{
7340   match(Set dst (ConvL2I (RShiftL (MulL (ConvI2L src1) src2) cnt)));
7341   predicate( _kids[0]->_kids[0]->_kids[1]->_leaf->Opcode() == Op_ConL &&
7342              _kids[0]->_kids[0]->_kids[1]->_leaf->as_Type()->type()->is_long()->get_con() >= min_jint &&
7343              _kids[0]->_kids[0]->_kids[1]->_leaf->as_Type()->type()->is_long()->get_con() <= max_jint );
7344   effect(USE src1, KILL cr);
7345 
7346   // Note that this is adjusted by 150 to compensate for the overcosting of loadConL_low_only
7347   ins_cost(0*100 + 1*400 - 150);
7348   format %{ "IMUL   EDX:EAX,$src1" %}
7349   ins_encode( multiply_con_and_shift_high( dst, src1, src2, cnt, cr ) );
7350   ins_pipe( pipe_slow );
7351 %}
7352 
7353 // Multiply by 32-bit Immediate, taking the shifted high order results
7354 instruct mulI_imm_RShift_high(eDXRegI dst, nadxRegI src1, eADXRegL_low_only src2, immI_32_63 cnt, eFlagsReg cr) %{
7355   match(Set dst (ConvL2I (RShiftL (MulL (ConvI2L src1) src2) cnt)));
7356   predicate( _kids[0]->_kids[0]->_kids[1]->_leaf->Opcode() == Op_ConL &&
7357              _kids[0]->_kids[0]->_kids[1]->_leaf->as_Type()->type()->is_long()->get_con() >= min_jint &&
7358              _kids[0]->_kids[0]->_kids[1]->_leaf->as_Type()->type()->is_long()->get_con() <= max_jint );
7359   effect(USE src1, KILL cr);
7360 
7361   // Note that this is adjusted by 150 to compensate for the overcosting of loadConL_low_only
7362   ins_cost(1*100 + 1*400 - 150);
7363   format %{ "IMUL   EDX:EAX,$src1\n\t"
7364             "SAR    EDX,$cnt-32" %}
7365   ins_encode( multiply_con_and_shift_high( dst, src1, src2, cnt, cr ) );
7366   ins_pipe( pipe_slow );
7367 %}
7368 
7369 // Multiply Memory 32-bit Immediate
7370 instruct mulI_mem_imm(rRegI dst, memory src, immI imm, eFlagsReg cr) %{
7371   match(Set dst (MulI (LoadI src) imm));
7372   effect(KILL cr);
7373 
7374   ins_cost(300);
7375   format %{ "IMUL   $dst,$src,$imm" %}
7376   opcode(0x69);  /* 69 /r id */
7377   ins_encode( OpcSE(imm), RegMem( dst, src ), Con8or32( imm ) );
7378   ins_pipe( ialu_reg_mem_alu0 );
7379 %}
7380 
7381 // Multiply Memory
7382 instruct mulI(rRegI dst, memory src, eFlagsReg cr) %{
7383   match(Set dst (MulI dst (LoadI src)));
7384   effect(KILL cr);
7385 
7386   ins_cost(350);
7387   format %{ "IMUL   $dst,$src" %}
7388   opcode(0xAF, 0x0F);
7389   ins_encode( OpcS, OpcP, RegMem( dst, src) );
7390   ins_pipe( ialu_reg_mem_alu0 );
7391 %}
7392 
7393 // Multiply Register Int to Long
7394 instruct mulI2L(eADXRegL dst, eAXRegI src, nadxRegI src1, eFlagsReg flags) %{
7395   // Basic Idea: long = (long)int * (long)int
7396   match(Set dst (MulL (ConvI2L src) (ConvI2L src1)));
7397   effect(DEF dst, USE src, USE src1, KILL flags);
7398 
7399   ins_cost(300);
7400   format %{ "IMUL   $dst,$src1" %}
7401 
7402   ins_encode( long_int_multiply( dst, src1 ) );
7403   ins_pipe( ialu_reg_reg_alu0 );
7404 %}
7405 
7406 instruct mulIS_eReg(eADXRegL dst, immL_32bits mask, eFlagsReg flags, eAXRegI src, nadxRegI src1) %{
7407   // Basic Idea:  long = (int & 0xffffffffL) * (int & 0xffffffffL)
7408   match(Set dst (MulL (AndL (ConvI2L src) mask) (AndL (ConvI2L src1) mask)));
7409   effect(KILL flags);
7410 
7411   ins_cost(300);
7412   format %{ "MUL    $dst,$src1" %}
7413 
7414   ins_encode( long_uint_multiply(dst, src1) );
7415   ins_pipe( ialu_reg_reg_alu0 );
7416 %}
7417 
7418 // Multiply Register Long
7419 instruct mulL_eReg(eADXRegL dst, eRegL src, rRegI tmp, eFlagsReg cr) %{
7420   match(Set dst (MulL dst src));
7421   effect(KILL cr, TEMP tmp);
7422   ins_cost(4*100+3*400);
7423 // Basic idea: lo(result) = lo(x_lo * y_lo)
7424 //             hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi)
7425   format %{ "MOV    $tmp,$src.lo\n\t"
7426             "IMUL   $tmp,EDX\n\t"
7427             "MOV    EDX,$src.hi\n\t"
7428             "IMUL   EDX,EAX\n\t"
7429             "ADD    $tmp,EDX\n\t"
7430             "MUL    EDX:EAX,$src.lo\n\t"
7431             "ADD    EDX,$tmp" %}
7432   ins_encode( long_multiply( dst, src, tmp ) );
7433   ins_pipe( pipe_slow );
7434 %}
7435 
7436 // Multiply Register Long where the left operand's high 32 bits are zero
7437 instruct mulL_eReg_lhi0(eADXRegL dst, eRegL src, rRegI tmp, eFlagsReg cr) %{
7438   predicate(is_operand_hi32_zero(n->in(1)));
7439   match(Set dst (MulL dst src));
7440   effect(KILL cr, TEMP tmp);
7441   ins_cost(2*100+2*400);
7442 // Basic idea: lo(result) = lo(x_lo * y_lo)
7443 //             hi(result) = hi(x_lo * y_lo) + lo(x_lo * y_hi) where lo(x_hi * y_lo) = 0 because x_hi = 0
7444   format %{ "MOV    $tmp,$src.hi\n\t"
7445             "IMUL   $tmp,EAX\n\t"
7446             "MUL    EDX:EAX,$src.lo\n\t"
7447             "ADD    EDX,$tmp" %}
7448   ins_encode %{
7449     __ movl($tmp$$Register, HIGH_FROM_LOW($src$$Register));
7450     __ imull($tmp$$Register, rax);
7451     __ mull($src$$Register);
7452     __ addl(rdx, $tmp$$Register);
7453   %}
7454   ins_pipe( pipe_slow );
7455 %}
7456 
7457 // Multiply Register Long where the right operand's high 32 bits are zero
7458 instruct mulL_eReg_rhi0(eADXRegL dst, eRegL src, rRegI tmp, eFlagsReg cr) %{
7459   predicate(is_operand_hi32_zero(n->in(2)));
7460   match(Set dst (MulL dst src));
7461   effect(KILL cr, TEMP tmp);
7462   ins_cost(2*100+2*400);
7463 // Basic idea: lo(result) = lo(x_lo * y_lo)
7464 //             hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) where lo(x_lo * y_hi) = 0 because y_hi = 0
7465   format %{ "MOV    $tmp,$src.lo\n\t"
7466             "IMUL   $tmp,EDX\n\t"
7467             "MUL    EDX:EAX,$src.lo\n\t"
7468             "ADD    EDX,$tmp" %}
7469   ins_encode %{
7470     __ movl($tmp$$Register, $src$$Register);
7471     __ imull($tmp$$Register, rdx);
7472     __ mull($src$$Register);
7473     __ addl(rdx, $tmp$$Register);
7474   %}
7475   ins_pipe( pipe_slow );
7476 %}
7477 
7478 // Multiply Register Long where the left and the right operands' high 32 bits are zero
7479 instruct mulL_eReg_hi0(eADXRegL dst, eRegL src, eFlagsReg cr) %{
7480   predicate(is_operand_hi32_zero(n->in(1)) && is_operand_hi32_zero(n->in(2)));
7481   match(Set dst (MulL dst src));
7482   effect(KILL cr);
7483   ins_cost(1*400);
7484 // Basic idea: lo(result) = lo(x_lo * y_lo)
7485 //             hi(result) = hi(x_lo * y_lo) where lo(x_hi * y_lo) = 0 and lo(x_lo * y_hi) = 0 because x_hi = 0 and y_hi = 0
7486   format %{ "MUL    EDX:EAX,$src.lo\n\t" %}
7487   ins_encode %{
7488     __ mull($src$$Register);
7489   %}
7490   ins_pipe( pipe_slow );
7491 %}
7492 
7493 // Multiply Register Long by small constant
7494 instruct mulL_eReg_con(eADXRegL dst, immL_127 src, rRegI tmp, eFlagsReg cr) %{
7495   match(Set dst (MulL dst src));
7496   effect(KILL cr, TEMP tmp);
7497   ins_cost(2*100+2*400);
7498   size(12);
7499 // Basic idea: lo(result) = lo(src * EAX)
7500 //             hi(result) = hi(src * EAX) + lo(src * EDX)
7501   format %{ "IMUL   $tmp,EDX,$src\n\t"
7502             "MOV    EDX,$src\n\t"
7503             "MUL    EDX\t# EDX*EAX -> EDX:EAX\n\t"
7504             "ADD    EDX,$tmp" %}
7505   ins_encode( long_multiply_con( dst, src, tmp ) );
7506   ins_pipe( pipe_slow );
7507 %}
7508 
7509 // Integer DIV with Register
7510 instruct divI_eReg(eAXRegI rax, eDXRegI rdx, eCXRegI div, eFlagsReg cr) %{
7511   match(Set rax (DivI rax div));
7512   effect(KILL rdx, KILL cr);
7513   size(26);
7514   ins_cost(30*100+10*100);
7515   format %{ "CMP    EAX,0x80000000\n\t"
7516             "JNE,s  normal\n\t"
7517             "XOR    EDX,EDX\n\t"
7518             "CMP    ECX,-1\n\t"
7519             "JE,s   done\n"
7520     "normal: CDQ\n\t"
7521             "IDIV   $div\n\t"
7522     "done:"        %}
7523   opcode(0xF7, 0x7);  /* Opcode F7 /7 */
7524   ins_encode( cdq_enc, OpcP, RegOpc(div) );
7525   ins_pipe( ialu_reg_reg_alu0 );
7526 %}
7527 
7528 // Divide Register Long
7529 instruct divL_eReg( eADXRegL dst, eRegL src1, eRegL src2, eFlagsReg cr, eCXRegI cx, eBXRegI bx ) %{
7530   match(Set dst (DivL src1 src2));
7531   effect( KILL cr, KILL cx, KILL bx );
7532   ins_cost(10000);
7533   format %{ "PUSH   $src1.hi\n\t"
7534             "PUSH   $src1.lo\n\t"
7535             "PUSH   $src2.hi\n\t"
7536             "PUSH   $src2.lo\n\t"
7537             "CALL   SharedRuntime::ldiv\n\t"
7538             "ADD    ESP,16" %}
7539   ins_encode( long_div(src1,src2) );
7540   ins_pipe( pipe_slow );
7541 %}
7542 
7543 // Integer DIVMOD with Register, both quotient and mod results
7544 instruct divModI_eReg_divmod(eAXRegI rax, eDXRegI rdx, eCXRegI div, eFlagsReg cr) %{
7545   match(DivModI rax div);
7546   effect(KILL cr);
7547   size(26);
7548   ins_cost(30*100+10*100);
7549   format %{ "CMP    EAX,0x80000000\n\t"
7550             "JNE,s  normal\n\t"
7551             "XOR    EDX,EDX\n\t"
7552             "CMP    ECX,-1\n\t"
7553             "JE,s   done\n"
7554     "normal: CDQ\n\t"
7555             "IDIV   $div\n\t"
7556     "done:"        %}
7557   opcode(0xF7, 0x7);  /* Opcode F7 /7 */
7558   ins_encode( cdq_enc, OpcP, RegOpc(div) );
7559   ins_pipe( pipe_slow );
7560 %}
7561 
7562 // Integer MOD with Register
7563 instruct modI_eReg(eDXRegI rdx, eAXRegI rax, eCXRegI div, eFlagsReg cr) %{
7564   match(Set rdx (ModI rax div));
7565   effect(KILL rax, KILL cr);
7566 
7567   size(26);
7568   ins_cost(300);
7569   format %{ "CDQ\n\t"
7570             "IDIV   $div" %}
7571   opcode(0xF7, 0x7);  /* Opcode F7 /7 */
7572   ins_encode( cdq_enc, OpcP, RegOpc(div) );
7573   ins_pipe( ialu_reg_reg_alu0 );
7574 %}
7575 
7576 // Remainder Register Long
7577 instruct modL_eReg( eADXRegL dst, eRegL src1, eRegL src2, eFlagsReg cr, eCXRegI cx, eBXRegI bx ) %{
7578   match(Set dst (ModL src1 src2));
7579   effect( KILL cr, KILL cx, KILL bx );
7580   ins_cost(10000);
7581   format %{ "PUSH   $src1.hi\n\t"
7582             "PUSH   $src1.lo\n\t"
7583             "PUSH   $src2.hi\n\t"
7584             "PUSH   $src2.lo\n\t"
7585             "CALL   SharedRuntime::lrem\n\t"
7586             "ADD    ESP,16" %}
7587   ins_encode( long_mod(src1,src2) );
7588   ins_pipe( pipe_slow );
7589 %}
7590 
7591 // Divide Register Long (no special case since divisor != -1)
7592 instruct divL_eReg_imm32( eADXRegL dst, immL32 imm, rRegI tmp, rRegI tmp2, eFlagsReg cr ) %{
7593   match(Set dst (DivL dst imm));
7594   effect( TEMP tmp, TEMP tmp2, KILL cr );
7595   ins_cost(1000);
7596   format %{ "MOV    $tmp,abs($imm) # ldiv EDX:EAX,$imm\n\t"
7597             "XOR    $tmp2,$tmp2\n\t"
7598             "CMP    $tmp,EDX\n\t"
7599             "JA,s   fast\n\t"
7600             "MOV    $tmp2,EAX\n\t"
7601             "MOV    EAX,EDX\n\t"
7602             "MOV    EDX,0\n\t"
7603             "JLE,s  pos\n\t"
7604             "LNEG   EAX : $tmp2\n\t"
7605             "DIV    $tmp # unsigned division\n\t"
7606             "XCHG   EAX,$tmp2\n\t"
7607             "DIV    $tmp\n\t"
7608             "LNEG   $tmp2 : EAX\n\t"
7609             "JMP,s  done\n"
7610     "pos:\n\t"
7611             "DIV    $tmp\n\t"
7612             "XCHG   EAX,$tmp2\n"
7613     "fast:\n\t"
7614             "DIV    $tmp\n"
7615     "done:\n\t"
7616             "MOV    EDX,$tmp2\n\t"
7617             "NEG    EDX:EAX # if $imm < 0" %}
7618   ins_encode %{
7619     int con = (int)$imm$$constant;
7620     assert(con != 0 && con != -1 && con != min_jint, "wrong divisor");
7621     int pcon = (con > 0) ? con : -con;
7622     Label Lfast, Lpos, Ldone;
7623 
7624     __ movl($tmp$$Register, pcon);
7625     __ xorl($tmp2$$Register,$tmp2$$Register);
7626     __ cmpl($tmp$$Register, HIGH_FROM_LOW($dst$$Register));
7627     __ jccb(Assembler::above, Lfast); // result fits into 32 bit
7628 
7629     __ movl($tmp2$$Register, $dst$$Register); // save
7630     __ movl($dst$$Register, HIGH_FROM_LOW($dst$$Register));
7631     __ movl(HIGH_FROM_LOW($dst$$Register),0); // preserve flags
7632     __ jccb(Assembler::lessEqual, Lpos); // result is positive
7633 
7634     // Negative dividend.
7635     // convert value to positive to use unsigned division
7636     __ lneg($dst$$Register, $tmp2$$Register);
7637     __ divl($tmp$$Register);
7638     __ xchgl($dst$$Register, $tmp2$$Register);
7639     __ divl($tmp$$Register);
7640     // revert result back to negative
7641     __ lneg($tmp2$$Register, $dst$$Register);
7642     __ jmpb(Ldone);
7643 
7644     __ bind(Lpos);
7645     __ divl($tmp$$Register); // Use unsigned division
7646     __ xchgl($dst$$Register, $tmp2$$Register);
7647     // Fallthrow for final divide, tmp2 has 32 bit hi result
7648 
7649     __ bind(Lfast);
7650     // fast path: src is positive
7651     __ divl($tmp$$Register); // Use unsigned division
7652 
7653     __ bind(Ldone);
7654     __ movl(HIGH_FROM_LOW($dst$$Register),$tmp2$$Register);
7655     if (con < 0) {
7656       __ lneg(HIGH_FROM_LOW($dst$$Register), $dst$$Register);
7657     }
7658   %}
7659   ins_pipe( pipe_slow );
7660 %}
7661 
7662 // Remainder Register Long (remainder fit into 32 bits)
7663 instruct modL_eReg_imm32( eADXRegL dst, immL32 imm, rRegI tmp, rRegI tmp2, eFlagsReg cr ) %{
7664   match(Set dst (ModL dst imm));
7665   effect( TEMP tmp, TEMP tmp2, KILL cr );
7666   ins_cost(1000);
7667   format %{ "MOV    $tmp,abs($imm) # lrem EDX:EAX,$imm\n\t"
7668             "CMP    $tmp,EDX\n\t"
7669             "JA,s   fast\n\t"
7670             "MOV    $tmp2,EAX\n\t"
7671             "MOV    EAX,EDX\n\t"
7672             "MOV    EDX,0\n\t"
7673             "JLE,s  pos\n\t"
7674             "LNEG   EAX : $tmp2\n\t"
7675             "DIV    $tmp # unsigned division\n\t"
7676             "MOV    EAX,$tmp2\n\t"
7677             "DIV    $tmp\n\t"
7678             "NEG    EDX\n\t"
7679             "JMP,s  done\n"
7680     "pos:\n\t"
7681             "DIV    $tmp\n\t"
7682             "MOV    EAX,$tmp2\n"
7683     "fast:\n\t"
7684             "DIV    $tmp\n"
7685     "done:\n\t"
7686             "MOV    EAX,EDX\n\t"
7687             "SAR    EDX,31\n\t" %}
7688   ins_encode %{
7689     int con = (int)$imm$$constant;
7690     assert(con != 0 && con != -1 && con != min_jint, "wrong divisor");
7691     int pcon = (con > 0) ? con : -con;
7692     Label  Lfast, Lpos, Ldone;
7693 
7694     __ movl($tmp$$Register, pcon);
7695     __ cmpl($tmp$$Register, HIGH_FROM_LOW($dst$$Register));
7696     __ jccb(Assembler::above, Lfast); // src is positive and result fits into 32 bit
7697 
7698     __ movl($tmp2$$Register, $dst$$Register); // save
7699     __ movl($dst$$Register, HIGH_FROM_LOW($dst$$Register));
7700     __ movl(HIGH_FROM_LOW($dst$$Register),0); // preserve flags
7701     __ jccb(Assembler::lessEqual, Lpos); // result is positive
7702 
7703     // Negative dividend.
7704     // convert value to positive to use unsigned division
7705     __ lneg($dst$$Register, $tmp2$$Register);
7706     __ divl($tmp$$Register);
7707     __ movl($dst$$Register, $tmp2$$Register);
7708     __ divl($tmp$$Register);
7709     // revert remainder back to negative
7710     __ negl(HIGH_FROM_LOW($dst$$Register));
7711     __ jmpb(Ldone);
7712 
7713     __ bind(Lpos);
7714     __ divl($tmp$$Register);
7715     __ movl($dst$$Register, $tmp2$$Register);
7716 
7717     __ bind(Lfast);
7718     // fast path: src is positive
7719     __ divl($tmp$$Register);
7720 
7721     __ bind(Ldone);
7722     __ movl($dst$$Register, HIGH_FROM_LOW($dst$$Register));
7723     __ sarl(HIGH_FROM_LOW($dst$$Register), 31); // result sign
7724 
7725   %}
7726   ins_pipe( pipe_slow );
7727 %}
7728 
7729 // Integer Shift Instructions
7730 // Shift Left by one
7731 instruct shlI_eReg_1(rRegI dst, immI1 shift, eFlagsReg cr) %{
7732   match(Set dst (LShiftI dst shift));
7733   effect(KILL cr);
7734 
7735   size(2);
7736   format %{ "SHL    $dst,$shift" %}
7737   opcode(0xD1, 0x4);  /* D1 /4 */
7738   ins_encode( OpcP, RegOpc( dst ) );
7739   ins_pipe( ialu_reg );
7740 %}
7741 
7742 // Shift Left by 8-bit immediate
7743 instruct salI_eReg_imm(rRegI dst, immI8 shift, eFlagsReg cr) %{
7744   match(Set dst (LShiftI dst shift));
7745   effect(KILL cr);
7746 
7747   size(3);
7748   format %{ "SHL    $dst,$shift" %}
7749   opcode(0xC1, 0x4);  /* C1 /4 ib */
7750   ins_encode( RegOpcImm( dst, shift) );
7751   ins_pipe( ialu_reg );
7752 %}
7753 
7754 // Shift Left by variable
7755 instruct salI_eReg_CL(rRegI dst, eCXRegI shift, eFlagsReg cr) %{
7756   match(Set dst (LShiftI dst shift));
7757   effect(KILL cr);
7758 
7759   size(2);
7760   format %{ "SHL    $dst,$shift" %}
7761   opcode(0xD3, 0x4);  /* D3 /4 */
7762   ins_encode( OpcP, RegOpc( dst ) );
7763   ins_pipe( ialu_reg_reg );
7764 %}
7765 
7766 // Arithmetic shift right by one
7767 instruct sarI_eReg_1(rRegI dst, immI1 shift, eFlagsReg cr) %{
7768   match(Set dst (RShiftI dst shift));
7769   effect(KILL cr);
7770 
7771   size(2);
7772   format %{ "SAR    $dst,$shift" %}
7773   opcode(0xD1, 0x7);  /* D1 /7 */
7774   ins_encode( OpcP, RegOpc( dst ) );
7775   ins_pipe( ialu_reg );
7776 %}
7777 
7778 // Arithmetic shift right by one
7779 instruct sarI_mem_1(memory dst, immI1 shift, eFlagsReg cr) %{
7780   match(Set dst (StoreI dst (RShiftI (LoadI dst) shift)));
7781   effect(KILL cr);
7782   format %{ "SAR    $dst,$shift" %}
7783   opcode(0xD1, 0x7);  /* D1 /7 */
7784   ins_encode( OpcP, RMopc_Mem(secondary,dst) );
7785   ins_pipe( ialu_mem_imm );
7786 %}
7787 
7788 // Arithmetic Shift Right by 8-bit immediate
7789 instruct sarI_eReg_imm(rRegI dst, immI8 shift, eFlagsReg cr) %{
7790   match(Set dst (RShiftI dst shift));
7791   effect(KILL cr);
7792 
7793   size(3);
7794   format %{ "SAR    $dst,$shift" %}
7795   opcode(0xC1, 0x7);  /* C1 /7 ib */
7796   ins_encode( RegOpcImm( dst, shift ) );
7797   ins_pipe( ialu_mem_imm );
7798 %}
7799 
7800 // Arithmetic Shift Right by 8-bit immediate
7801 instruct sarI_mem_imm(memory dst, immI8 shift, eFlagsReg cr) %{
7802   match(Set dst (StoreI dst (RShiftI (LoadI dst) shift)));
7803   effect(KILL cr);
7804 
7805   format %{ "SAR    $dst,$shift" %}
7806   opcode(0xC1, 0x7);  /* C1 /7 ib */
7807   ins_encode( OpcP, RMopc_Mem(secondary, dst ), Con8or32( shift ) );
7808   ins_pipe( ialu_mem_imm );
7809 %}
7810 
7811 // Arithmetic Shift Right by variable
7812 instruct sarI_eReg_CL(rRegI dst, eCXRegI shift, eFlagsReg cr) %{
7813   match(Set dst (RShiftI dst shift));
7814   effect(KILL cr);
7815 
7816   size(2);
7817   format %{ "SAR    $dst,$shift" %}
7818   opcode(0xD3, 0x7);  /* D3 /7 */
7819   ins_encode( OpcP, RegOpc( dst ) );
7820   ins_pipe( ialu_reg_reg );
7821 %}
7822 
7823 // Logical shift right by one
7824 instruct shrI_eReg_1(rRegI dst, immI1 shift, eFlagsReg cr) %{
7825   match(Set dst (URShiftI dst shift));
7826   effect(KILL cr);
7827 
7828   size(2);
7829   format %{ "SHR    $dst,$shift" %}
7830   opcode(0xD1, 0x5);  /* D1 /5 */
7831   ins_encode( OpcP, RegOpc( dst ) );
7832   ins_pipe( ialu_reg );
7833 %}
7834 
7835 // Logical Shift Right by 8-bit immediate
7836 instruct shrI_eReg_imm(rRegI dst, immI8 shift, eFlagsReg cr) %{
7837   match(Set dst (URShiftI dst shift));
7838   effect(KILL cr);
7839 
7840   size(3);
7841   format %{ "SHR    $dst,$shift" %}
7842   opcode(0xC1, 0x5);  /* C1 /5 ib */
7843   ins_encode( RegOpcImm( dst, shift) );
7844   ins_pipe( ialu_reg );
7845 %}
7846 
7847 
7848 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
7849 // This idiom is used by the compiler for the i2b bytecode.
7850 instruct i2b(rRegI dst, xRegI src, immI_24 twentyfour) %{
7851   match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
7852 
7853   size(3);
7854   format %{ "MOVSX  $dst,$src :8" %}
7855   ins_encode %{
7856     __ movsbl($dst$$Register, $src$$Register);
7857   %}
7858   ins_pipe(ialu_reg_reg);
7859 %}
7860 
7861 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
7862 // This idiom is used by the compiler the i2s bytecode.
7863 instruct i2s(rRegI dst, xRegI src, immI_16 sixteen) %{
7864   match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
7865 
7866   size(3);
7867   format %{ "MOVSX  $dst,$src :16" %}
7868   ins_encode %{
7869     __ movswl($dst$$Register, $src$$Register);
7870   %}
7871   ins_pipe(ialu_reg_reg);
7872 %}
7873 
7874 
7875 // Logical Shift Right by variable
7876 instruct shrI_eReg_CL(rRegI dst, eCXRegI shift, eFlagsReg cr) %{
7877   match(Set dst (URShiftI dst shift));
7878   effect(KILL cr);
7879 
7880   size(2);
7881   format %{ "SHR    $dst,$shift" %}
7882   opcode(0xD3, 0x5);  /* D3 /5 */
7883   ins_encode( OpcP, RegOpc( dst ) );
7884   ins_pipe( ialu_reg_reg );
7885 %}
7886 
7887 
7888 //----------Logical Instructions-----------------------------------------------
7889 //----------Integer Logical Instructions---------------------------------------
7890 // And Instructions
7891 // And Register with Register
7892 instruct andI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{
7893   match(Set dst (AndI dst src));
7894   effect(KILL cr);
7895 
7896   size(2);
7897   format %{ "AND    $dst,$src" %}
7898   opcode(0x23);
7899   ins_encode( OpcP, RegReg( dst, src) );
7900   ins_pipe( ialu_reg_reg );
7901 %}
7902 
7903 // And Register with Immediate
7904 instruct andI_eReg_imm(rRegI dst, immI src, eFlagsReg cr) %{
7905   match(Set dst (AndI dst src));
7906   effect(KILL cr);
7907 
7908   format %{ "AND    $dst,$src" %}
7909   opcode(0x81,0x04);  /* Opcode 81 /4 */
7910   // ins_encode( RegImm( dst, src) );
7911   ins_encode( OpcSErm( dst, src ), Con8or32( src ) );
7912   ins_pipe( ialu_reg );
7913 %}
7914 
7915 // And Register with Memory
7916 instruct andI_eReg_mem(rRegI dst, memory src, eFlagsReg cr) %{
7917   match(Set dst (AndI dst (LoadI src)));
7918   effect(KILL cr);
7919 
7920   ins_cost(125);
7921   format %{ "AND    $dst,$src" %}
7922   opcode(0x23);
7923   ins_encode( OpcP, RegMem( dst, src) );
7924   ins_pipe( ialu_reg_mem );
7925 %}
7926 
7927 // And Memory with Register
7928 instruct andI_mem_eReg(memory dst, rRegI src, eFlagsReg cr) %{
7929   match(Set dst (StoreI dst (AndI (LoadI dst) src)));
7930   effect(KILL cr);
7931 
7932   ins_cost(150);
7933   format %{ "AND    $dst,$src" %}
7934   opcode(0x21);  /* Opcode 21 /r */
7935   ins_encode( OpcP, RegMem( src, dst ) );
7936   ins_pipe( ialu_mem_reg );
7937 %}
7938 
7939 // And Memory with Immediate
7940 instruct andI_mem_imm(memory dst, immI src, eFlagsReg cr) %{
7941   match(Set dst (StoreI dst (AndI (LoadI dst) src)));
7942   effect(KILL cr);
7943 
7944   ins_cost(125);
7945   format %{ "AND    $dst,$src" %}
7946   opcode(0x81, 0x4);  /* Opcode 81 /4 id */
7947   // ins_encode( MemImm( dst, src) );
7948   ins_encode( OpcSE( src ), RMopc_Mem(secondary, dst ), Con8or32( src ) );
7949   ins_pipe( ialu_mem_imm );
7950 %}
7951 
7952 // BMI1 instructions
7953 instruct andnI_rReg_rReg_rReg(rRegI dst, rRegI src1, rRegI src2, immI_M1 minus_1, eFlagsReg cr) %{
7954   match(Set dst (AndI (XorI src1 minus_1) src2));
7955   predicate(UseBMI1Instructions);
7956   effect(KILL cr);
7957 
7958   format %{ "ANDNL  $dst, $src1, $src2" %}
7959 
7960   ins_encode %{
7961     __ andnl($dst$$Register, $src1$$Register, $src2$$Register);
7962   %}
7963   ins_pipe(ialu_reg);
7964 %}
7965 
7966 instruct andnI_rReg_rReg_mem(rRegI dst, rRegI src1, memory src2, immI_M1 minus_1, eFlagsReg cr) %{
7967   match(Set dst (AndI (XorI src1 minus_1) (LoadI src2) ));
7968   predicate(UseBMI1Instructions);
7969   effect(KILL cr);
7970 
7971   ins_cost(125);
7972   format %{ "ANDNL  $dst, $src1, $src2" %}
7973 
7974   ins_encode %{
7975     __ andnl($dst$$Register, $src1$$Register, $src2$$Address);
7976   %}
7977   ins_pipe(ialu_reg_mem);
7978 %}
7979 
7980 instruct blsiI_rReg_rReg(rRegI dst, rRegI src, immI0 imm_zero, eFlagsReg cr) %{
7981   match(Set dst (AndI (SubI imm_zero src) src));
7982   predicate(UseBMI1Instructions);
7983   effect(KILL cr);
7984 
7985   format %{ "BLSIL  $dst, $src" %}
7986 
7987   ins_encode %{
7988     __ blsil($dst$$Register, $src$$Register);
7989   %}
7990   ins_pipe(ialu_reg);
7991 %}
7992 
7993 instruct blsiI_rReg_mem(rRegI dst, memory src, immI0 imm_zero, eFlagsReg cr) %{
7994   match(Set dst (AndI (SubI imm_zero (LoadI src) ) (LoadI src) ));
7995   predicate(UseBMI1Instructions);
7996   effect(KILL cr);
7997 
7998   ins_cost(125);
7999   format %{ "BLSIL  $dst, $src" %}
8000 
8001   ins_encode %{
8002     __ blsil($dst$$Register, $src$$Address);
8003   %}
8004   ins_pipe(ialu_reg_mem);
8005 %}
8006 
8007 instruct blsmskI_rReg_rReg(rRegI dst, rRegI src, immI_M1 minus_1, eFlagsReg cr)
8008 %{
8009   match(Set dst (XorI (AddI src minus_1) src));
8010   predicate(UseBMI1Instructions);
8011   effect(KILL cr);
8012 
8013   format %{ "BLSMSKL $dst, $src" %}
8014 
8015   ins_encode %{
8016     __ blsmskl($dst$$Register, $src$$Register);
8017   %}
8018 
8019   ins_pipe(ialu_reg);
8020 %}
8021 
8022 instruct blsmskI_rReg_mem(rRegI dst, memory src, immI_M1 minus_1, eFlagsReg cr)
8023 %{
8024   match(Set dst (XorI (AddI (LoadI src) minus_1) (LoadI src) ));
8025   predicate(UseBMI1Instructions);
8026   effect(KILL cr);
8027 
8028   ins_cost(125);
8029   format %{ "BLSMSKL $dst, $src" %}
8030 
8031   ins_encode %{
8032     __ blsmskl($dst$$Register, $src$$Address);
8033   %}
8034 
8035   ins_pipe(ialu_reg_mem);
8036 %}
8037 
8038 instruct blsrI_rReg_rReg(rRegI dst, rRegI src, immI_M1 minus_1, eFlagsReg cr)
8039 %{
8040   match(Set dst (AndI (AddI src minus_1) src) );
8041   predicate(UseBMI1Instructions);
8042   effect(KILL cr);
8043 
8044   format %{ "BLSRL  $dst, $src" %}
8045 
8046   ins_encode %{
8047     __ blsrl($dst$$Register, $src$$Register);
8048   %}
8049 
8050   ins_pipe(ialu_reg);
8051 %}
8052 
8053 instruct blsrI_rReg_mem(rRegI dst, memory src, immI_M1 minus_1, eFlagsReg cr)
8054 %{
8055   match(Set dst (AndI (AddI (LoadI src) minus_1) (LoadI src) ));
8056   predicate(UseBMI1Instructions);
8057   effect(KILL cr);
8058 
8059   ins_cost(125);
8060   format %{ "BLSRL  $dst, $src" %}
8061 
8062   ins_encode %{
8063     __ blsrl($dst$$Register, $src$$Address);
8064   %}
8065 
8066   ins_pipe(ialu_reg_mem);
8067 %}
8068 
8069 // Or Instructions
8070 // Or Register with Register
8071 instruct orI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{
8072   match(Set dst (OrI dst src));
8073   effect(KILL cr);
8074 
8075   size(2);
8076   format %{ "OR     $dst,$src" %}
8077   opcode(0x0B);
8078   ins_encode( OpcP, RegReg( dst, src) );
8079   ins_pipe( ialu_reg_reg );
8080 %}
8081 
8082 instruct orI_eReg_castP2X(rRegI dst, eRegP src, eFlagsReg cr) %{
8083   match(Set dst (OrI dst (CastP2X src)));
8084   effect(KILL cr);
8085 
8086   size(2);
8087   format %{ "OR     $dst,$src" %}
8088   opcode(0x0B);
8089   ins_encode( OpcP, RegReg( dst, src) );
8090   ins_pipe( ialu_reg_reg );
8091 %}
8092 
8093 
8094 // Or Register with Immediate
8095 instruct orI_eReg_imm(rRegI dst, immI src, eFlagsReg cr) %{
8096   match(Set dst (OrI dst src));
8097   effect(KILL cr);
8098 
8099   format %{ "OR     $dst,$src" %}
8100   opcode(0x81,0x01);  /* Opcode 81 /1 id */
8101   // ins_encode( RegImm( dst, src) );
8102   ins_encode( OpcSErm( dst, src ), Con8or32( src ) );
8103   ins_pipe( ialu_reg );
8104 %}
8105 
8106 // Or Register with Memory
8107 instruct orI_eReg_mem(rRegI dst, memory src, eFlagsReg cr) %{
8108   match(Set dst (OrI dst (LoadI src)));
8109   effect(KILL cr);
8110 
8111   ins_cost(125);
8112   format %{ "OR     $dst,$src" %}
8113   opcode(0x0B);
8114   ins_encode( OpcP, RegMem( dst, src) );
8115   ins_pipe( ialu_reg_mem );
8116 %}
8117 
8118 // Or Memory with Register
8119 instruct orI_mem_eReg(memory dst, rRegI src, eFlagsReg cr) %{
8120   match(Set dst (StoreI dst (OrI (LoadI dst) src)));
8121   effect(KILL cr);
8122 
8123   ins_cost(150);
8124   format %{ "OR     $dst,$src" %}
8125   opcode(0x09);  /* Opcode 09 /r */
8126   ins_encode( OpcP, RegMem( src, dst ) );
8127   ins_pipe( ialu_mem_reg );
8128 %}
8129 
8130 // Or Memory with Immediate
8131 instruct orI_mem_imm(memory dst, immI src, eFlagsReg cr) %{
8132   match(Set dst (StoreI dst (OrI (LoadI dst) src)));
8133   effect(KILL cr);
8134 
8135   ins_cost(125);
8136   format %{ "OR     $dst,$src" %}
8137   opcode(0x81,0x1);  /* Opcode 81 /1 id */
8138   // ins_encode( MemImm( dst, src) );
8139   ins_encode( OpcSE( src ), RMopc_Mem(secondary, dst ), Con8or32( src ) );
8140   ins_pipe( ialu_mem_imm );
8141 %}
8142 
8143 // ROL/ROR
8144 // ROL expand
8145 instruct rolI_eReg_imm1(rRegI dst, immI1 shift, eFlagsReg cr) %{
8146   effect(USE_DEF dst, USE shift, KILL cr);
8147 
8148   format %{ "ROL    $dst, $shift" %}
8149   opcode(0xD1, 0x0); /* Opcode D1 /0 */
8150   ins_encode( OpcP, RegOpc( dst ));
8151   ins_pipe( ialu_reg );
8152 %}
8153 
8154 instruct rolI_eReg_imm8(rRegI dst, immI8 shift, eFlagsReg cr) %{
8155   effect(USE_DEF dst, USE shift, KILL cr);
8156 
8157   format %{ "ROL    $dst, $shift" %}
8158   opcode(0xC1, 0x0); /*Opcode /C1  /0  */
8159   ins_encode( RegOpcImm(dst, shift) );
8160   ins_pipe(ialu_reg);
8161 %}
8162 
8163 instruct rolI_eReg_CL(ncxRegI dst, eCXRegI shift, eFlagsReg cr) %{
8164   effect(USE_DEF dst, USE shift, KILL cr);
8165 
8166   format %{ "ROL    $dst, $shift" %}
8167   opcode(0xD3, 0x0);    /* Opcode D3 /0 */
8168   ins_encode(OpcP, RegOpc(dst));
8169   ins_pipe( ialu_reg_reg );
8170 %}
8171 // end of ROL expand
8172 
8173 // ROL 32bit by one once
8174 instruct rolI_eReg_i1(rRegI dst, immI1 lshift, immI_M1 rshift, eFlagsReg cr) %{
8175   match(Set dst ( OrI (LShiftI dst lshift) (URShiftI dst rshift)));
8176 
8177   expand %{
8178     rolI_eReg_imm1(dst, lshift, cr);
8179   %}
8180 %}
8181 
8182 // ROL 32bit var by imm8 once
8183 instruct rolI_eReg_i8(rRegI dst, immI8 lshift, immI8 rshift, eFlagsReg cr) %{
8184   predicate(  0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
8185   match(Set dst ( OrI (LShiftI dst lshift) (URShiftI dst rshift)));
8186 
8187   expand %{
8188     rolI_eReg_imm8(dst, lshift, cr);
8189   %}
8190 %}
8191 
8192 // ROL 32bit var by var once
8193 instruct rolI_eReg_Var_C0(ncxRegI dst, eCXRegI shift, immI0 zero, eFlagsReg cr) %{
8194   match(Set dst ( OrI (LShiftI dst shift) (URShiftI dst (SubI zero shift))));
8195 
8196   expand %{
8197     rolI_eReg_CL(dst, shift, cr);
8198   %}
8199 %}
8200 
8201 // ROL 32bit var by var once
8202 instruct rolI_eReg_Var_C32(ncxRegI dst, eCXRegI shift, immI_32 c32, eFlagsReg cr) %{
8203   match(Set dst ( OrI (LShiftI dst shift) (URShiftI dst (SubI c32 shift))));
8204 
8205   expand %{
8206     rolI_eReg_CL(dst, shift, cr);
8207   %}
8208 %}
8209 
8210 // ROR expand
8211 instruct rorI_eReg_imm1(rRegI dst, immI1 shift, eFlagsReg cr) %{
8212   effect(USE_DEF dst, USE shift, KILL cr);
8213 
8214   format %{ "ROR    $dst, $shift" %}
8215   opcode(0xD1,0x1);  /* Opcode D1 /1 */
8216   ins_encode( OpcP, RegOpc( dst ) );
8217   ins_pipe( ialu_reg );
8218 %}
8219 
8220 instruct rorI_eReg_imm8(rRegI dst, immI8 shift, eFlagsReg cr) %{
8221   effect (USE_DEF dst, USE shift, KILL cr);
8222 
8223   format %{ "ROR    $dst, $shift" %}
8224   opcode(0xC1, 0x1); /* Opcode /C1 /1 ib */
8225   ins_encode( RegOpcImm(dst, shift) );
8226   ins_pipe( ialu_reg );
8227 %}
8228 
8229 instruct rorI_eReg_CL(ncxRegI dst, eCXRegI shift, eFlagsReg cr)%{
8230   effect(USE_DEF dst, USE shift, KILL cr);
8231 
8232   format %{ "ROR    $dst, $shift" %}
8233   opcode(0xD3, 0x1);    /* Opcode D3 /1 */
8234   ins_encode(OpcP, RegOpc(dst));
8235   ins_pipe( ialu_reg_reg );
8236 %}
8237 // end of ROR expand
8238 
8239 // ROR right once
8240 instruct rorI_eReg_i1(rRegI dst, immI1 rshift, immI_M1 lshift, eFlagsReg cr) %{
8241   match(Set dst ( OrI (URShiftI dst rshift) (LShiftI dst lshift)));
8242 
8243   expand %{
8244     rorI_eReg_imm1(dst, rshift, cr);
8245   %}
8246 %}
8247 
8248 // ROR 32bit by immI8 once
8249 instruct rorI_eReg_i8(rRegI dst, immI8 rshift, immI8 lshift, eFlagsReg cr) %{
8250   predicate(  0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
8251   match(Set dst ( OrI (URShiftI dst rshift) (LShiftI dst lshift)));
8252 
8253   expand %{
8254     rorI_eReg_imm8(dst, rshift, cr);
8255   %}
8256 %}
8257 
8258 // ROR 32bit var by var once
8259 instruct rorI_eReg_Var_C0(ncxRegI dst, eCXRegI shift, immI0 zero, eFlagsReg cr) %{
8260   match(Set dst ( OrI (URShiftI dst shift) (LShiftI dst (SubI zero shift))));
8261 
8262   expand %{
8263     rorI_eReg_CL(dst, shift, cr);
8264   %}
8265 %}
8266 
8267 // ROR 32bit var by var once
8268 instruct rorI_eReg_Var_C32(ncxRegI dst, eCXRegI shift, immI_32 c32, eFlagsReg cr) %{
8269   match(Set dst ( OrI (URShiftI dst shift) (LShiftI dst (SubI c32 shift))));
8270 
8271   expand %{
8272     rorI_eReg_CL(dst, shift, cr);
8273   %}
8274 %}
8275 
8276 // Xor Instructions
8277 // Xor Register with Register
8278 instruct xorI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{
8279   match(Set dst (XorI dst src));
8280   effect(KILL cr);
8281 
8282   size(2);
8283   format %{ "XOR    $dst,$src" %}
8284   opcode(0x33);
8285   ins_encode( OpcP, RegReg( dst, src) );
8286   ins_pipe( ialu_reg_reg );
8287 %}
8288 
8289 // Xor Register with Immediate -1
8290 instruct xorI_eReg_im1(rRegI dst, immI_M1 imm) %{
8291   match(Set dst (XorI dst imm));  
8292 
8293   size(2);
8294   format %{ "NOT    $dst" %}  
8295   ins_encode %{
8296      __ notl($dst$$Register);
8297   %}
8298   ins_pipe( ialu_reg );
8299 %}
8300 
8301 // Xor Register with Immediate
8302 instruct xorI_eReg_imm(rRegI dst, immI src, eFlagsReg cr) %{
8303   match(Set dst (XorI dst src));
8304   effect(KILL cr);
8305 
8306   format %{ "XOR    $dst,$src" %}
8307   opcode(0x81,0x06);  /* Opcode 81 /6 id */
8308   // ins_encode( RegImm( dst, src) );
8309   ins_encode( OpcSErm( dst, src ), Con8or32( src ) );
8310   ins_pipe( ialu_reg );
8311 %}
8312 
8313 // Xor Register with Memory
8314 instruct xorI_eReg_mem(rRegI dst, memory src, eFlagsReg cr) %{
8315   match(Set dst (XorI dst (LoadI src)));
8316   effect(KILL cr);
8317 
8318   ins_cost(125);
8319   format %{ "XOR    $dst,$src" %}
8320   opcode(0x33);
8321   ins_encode( OpcP, RegMem(dst, src) );
8322   ins_pipe( ialu_reg_mem );
8323 %}
8324 
8325 // Xor Memory with Register
8326 instruct xorI_mem_eReg(memory dst, rRegI src, eFlagsReg cr) %{
8327   match(Set dst (StoreI dst (XorI (LoadI dst) src)));
8328   effect(KILL cr);
8329 
8330   ins_cost(150);
8331   format %{ "XOR    $dst,$src" %}
8332   opcode(0x31);  /* Opcode 31 /r */
8333   ins_encode( OpcP, RegMem( src, dst ) );
8334   ins_pipe( ialu_mem_reg );
8335 %}
8336 
8337 // Xor Memory with Immediate
8338 instruct xorI_mem_imm(memory dst, immI src, eFlagsReg cr) %{
8339   match(Set dst (StoreI dst (XorI (LoadI dst) src)));
8340   effect(KILL cr);
8341 
8342   ins_cost(125);
8343   format %{ "XOR    $dst,$src" %}
8344   opcode(0x81,0x6);  /* Opcode 81 /6 id */
8345   ins_encode( OpcSE( src ), RMopc_Mem(secondary, dst ), Con8or32( src ) );
8346   ins_pipe( ialu_mem_imm );
8347 %}
8348 
8349 //----------Convert Int to Boolean---------------------------------------------
8350 
8351 instruct movI_nocopy(rRegI dst, rRegI src) %{
8352   effect( DEF dst, USE src );
8353   format %{ "MOV    $dst,$src" %}
8354   ins_encode( enc_Copy( dst, src) );
8355   ins_pipe( ialu_reg_reg );
8356 %}
8357 
8358 instruct ci2b( rRegI dst, rRegI src, eFlagsReg cr ) %{
8359   effect( USE_DEF dst, USE src, KILL cr );
8360 
8361   size(4);
8362   format %{ "NEG    $dst\n\t"
8363             "ADC    $dst,$src" %}
8364   ins_encode( neg_reg(dst),
8365               OpcRegReg(0x13,dst,src) );
8366   ins_pipe( ialu_reg_reg_long );
8367 %}
8368 
8369 instruct convI2B( rRegI dst, rRegI src, eFlagsReg cr ) %{
8370   match(Set dst (Conv2B src));
8371 
8372   expand %{
8373     movI_nocopy(dst,src);
8374     ci2b(dst,src,cr);
8375   %}
8376 %}
8377 
8378 instruct movP_nocopy(rRegI dst, eRegP src) %{
8379   effect( DEF dst, USE src );
8380   format %{ "MOV    $dst,$src" %}
8381   ins_encode( enc_Copy( dst, src) );
8382   ins_pipe( ialu_reg_reg );
8383 %}
8384 
8385 instruct cp2b( rRegI dst, eRegP src, eFlagsReg cr ) %{
8386   effect( USE_DEF dst, USE src, KILL cr );
8387   format %{ "NEG    $dst\n\t"
8388             "ADC    $dst,$src" %}
8389   ins_encode( neg_reg(dst),
8390               OpcRegReg(0x13,dst,src) );
8391   ins_pipe( ialu_reg_reg_long );
8392 %}
8393 
8394 instruct convP2B( rRegI dst, eRegP src, eFlagsReg cr ) %{
8395   match(Set dst (Conv2B src));
8396 
8397   expand %{
8398     movP_nocopy(dst,src);
8399     cp2b(dst,src,cr);
8400   %}
8401 %}
8402 
8403 instruct cmpLTMask(eCXRegI dst, ncxRegI p, ncxRegI q, eFlagsReg cr) %{
8404   match(Set dst (CmpLTMask p q));
8405   effect(KILL cr);
8406   ins_cost(400);
8407 
8408   // SETlt can only use low byte of EAX,EBX, ECX, or EDX as destination
8409   format %{ "XOR    $dst,$dst\n\t"
8410             "CMP    $p,$q\n\t"
8411             "SETlt  $dst\n\t"
8412             "NEG    $dst" %}
8413   ins_encode %{
8414     Register Rp = $p$$Register;
8415     Register Rq = $q$$Register;
8416     Register Rd = $dst$$Register;
8417     Label done;
8418     __ xorl(Rd, Rd);
8419     __ cmpl(Rp, Rq);
8420     __ setb(Assembler::less, Rd);
8421     __ negl(Rd);
8422   %}
8423 
8424   ins_pipe(pipe_slow);
8425 %}
8426 
8427 instruct cmpLTMask0(rRegI dst, immI0 zero, eFlagsReg cr) %{
8428   match(Set dst (CmpLTMask dst zero));
8429   effect(DEF dst, KILL cr);
8430   ins_cost(100);
8431 
8432   format %{ "SAR    $dst,31\t# cmpLTMask0" %}
8433   ins_encode %{
8434   __ sarl($dst$$Register, 31);
8435   %}
8436   ins_pipe(ialu_reg);
8437 %}
8438 
8439 /* better to save a register than avoid a branch */
8440 instruct cadd_cmpLTMask(rRegI p, rRegI q, rRegI y, eFlagsReg cr) %{
8441   match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q)));
8442   effect(KILL cr);
8443   ins_cost(400);
8444   format %{ "SUB    $p,$q\t# cadd_cmpLTMask\n\t"
8445             "JGE    done\n\t"
8446             "ADD    $p,$y\n"
8447             "done:  " %}
8448   ins_encode %{
8449     Register Rp = $p$$Register;
8450     Register Rq = $q$$Register;
8451     Register Ry = $y$$Register;
8452     Label done;
8453     __ subl(Rp, Rq);
8454     __ jccb(Assembler::greaterEqual, done);
8455     __ addl(Rp, Ry);
8456     __ bind(done);
8457   %}
8458 
8459   ins_pipe(pipe_cmplt);
8460 %}
8461 
8462 /* better to save a register than avoid a branch */
8463 instruct and_cmpLTMask(rRegI p, rRegI q, rRegI y, eFlagsReg cr) %{
8464   match(Set y (AndI (CmpLTMask p q) y));
8465   effect(KILL cr);
8466 
8467   ins_cost(300);
8468 
8469   format %{ "CMPL     $p, $q\t# and_cmpLTMask\n\t"
8470             "JLT      done\n\t"
8471             "XORL     $y, $y\n"
8472             "done:  " %}
8473   ins_encode %{
8474     Register Rp = $p$$Register;
8475     Register Rq = $q$$Register;
8476     Register Ry = $y$$Register;
8477     Label done;
8478     __ cmpl(Rp, Rq);
8479     __ jccb(Assembler::less, done);
8480     __ xorl(Ry, Ry);
8481     __ bind(done);
8482   %}
8483 
8484   ins_pipe(pipe_cmplt);
8485 %}
8486 
8487 /* If I enable this, I encourage spilling in the inner loop of compress.
8488 instruct cadd_cmpLTMask_mem(ncxRegI p, ncxRegI q, memory y, eCXRegI tmp, eFlagsReg cr) %{
8489   match(Set p (AddI (AndI (CmpLTMask p q) (LoadI y)) (SubI p q)));
8490 */
8491 //----------Overflow Math Instructions-----------------------------------------
8492 
8493 instruct overflowAddI_eReg(eFlagsReg cr, eAXRegI op1, rRegI op2)
8494 %{
8495   match(Set cr (OverflowAddI op1 op2));
8496   effect(DEF cr, USE_KILL op1, USE op2);
8497 
8498   format %{ "ADD    $op1, $op2\t# overflow check int" %}
8499 
8500   ins_encode %{
8501     __ addl($op1$$Register, $op2$$Register);
8502   %}
8503   ins_pipe(ialu_reg_reg);
8504 %}
8505 
8506 instruct overflowAddI_rReg_imm(eFlagsReg cr, eAXRegI op1, immI op2)
8507 %{
8508   match(Set cr (OverflowAddI op1 op2));
8509   effect(DEF cr, USE_KILL op1, USE op2);
8510 
8511   format %{ "ADD    $op1, $op2\t# overflow check int" %}
8512 
8513   ins_encode %{
8514     __ addl($op1$$Register, $op2$$constant);
8515   %}
8516   ins_pipe(ialu_reg_reg);
8517 %}
8518 
8519 instruct overflowSubI_rReg(eFlagsReg cr, rRegI op1, rRegI op2)
8520 %{
8521   match(Set cr (OverflowSubI op1 op2));
8522 
8523   format %{ "CMP    $op1, $op2\t# overflow check int" %}
8524   ins_encode %{
8525     __ cmpl($op1$$Register, $op2$$Register);
8526   %}
8527   ins_pipe(ialu_reg_reg);
8528 %}
8529 
8530 instruct overflowSubI_rReg_imm(eFlagsReg cr, rRegI op1, immI op2)
8531 %{
8532   match(Set cr (OverflowSubI op1 op2));
8533 
8534   format %{ "CMP    $op1, $op2\t# overflow check int" %}
8535   ins_encode %{
8536     __ cmpl($op1$$Register, $op2$$constant);
8537   %}
8538   ins_pipe(ialu_reg_reg);
8539 %}
8540 
8541 instruct overflowNegI_rReg(eFlagsReg cr, immI0 zero, eAXRegI op2)
8542 %{
8543   match(Set cr (OverflowSubI zero op2));
8544   effect(DEF cr, USE_KILL op2);
8545 
8546   format %{ "NEG    $op2\t# overflow check int" %}
8547   ins_encode %{
8548     __ negl($op2$$Register);
8549   %}
8550   ins_pipe(ialu_reg_reg);
8551 %}
8552 
8553 instruct overflowMulI_rReg(eFlagsReg cr, eAXRegI op1, rRegI op2)
8554 %{
8555   match(Set cr (OverflowMulI op1 op2));
8556   effect(DEF cr, USE_KILL op1, USE op2);
8557 
8558   format %{ "IMUL    $op1, $op2\t# overflow check int" %}
8559   ins_encode %{
8560     __ imull($op1$$Register, $op2$$Register);
8561   %}
8562   ins_pipe(ialu_reg_reg_alu0);
8563 %}
8564 
8565 instruct overflowMulI_rReg_imm(eFlagsReg cr, rRegI op1, immI op2, rRegI tmp)
8566 %{
8567   match(Set cr (OverflowMulI op1 op2));
8568   effect(DEF cr, TEMP tmp, USE op1, USE op2);
8569 
8570   format %{ "IMUL    $tmp, $op1, $op2\t# overflow check int" %}
8571   ins_encode %{
8572     __ imull($tmp$$Register, $op1$$Register, $op2$$constant);
8573   %}
8574   ins_pipe(ialu_reg_reg_alu0);
8575 %}
8576 
8577 //----------Long Instructions------------------------------------------------
8578 // Add Long Register with Register
8579 instruct addL_eReg(eRegL dst, eRegL src, eFlagsReg cr) %{
8580   match(Set dst (AddL dst src));
8581   effect(KILL cr);
8582   ins_cost(200);
8583   format %{ "ADD    $dst.lo,$src.lo\n\t"
8584             "ADC    $dst.hi,$src.hi" %}
8585   opcode(0x03, 0x13);
8586   ins_encode( RegReg_Lo(dst, src), RegReg_Hi(dst,src) );
8587   ins_pipe( ialu_reg_reg_long );
8588 %}
8589 
8590 // Add Long Register with Immediate
8591 instruct addL_eReg_imm(eRegL dst, immL src, eFlagsReg cr) %{
8592   match(Set dst (AddL dst src));
8593   effect(KILL cr);
8594   format %{ "ADD    $dst.lo,$src.lo\n\t"
8595             "ADC    $dst.hi,$src.hi" %}
8596   opcode(0x81,0x00,0x02);  /* Opcode 81 /0, 81 /2 */
8597   ins_encode( Long_OpcSErm_Lo( dst, src ), Long_OpcSErm_Hi( dst, src ) );
8598   ins_pipe( ialu_reg_long );
8599 %}
8600 
8601 // Add Long Register with Memory
8602 instruct addL_eReg_mem(eRegL dst, load_long_memory mem, eFlagsReg cr) %{
8603   match(Set dst (AddL dst (LoadL mem)));
8604   effect(KILL cr);
8605   ins_cost(125);
8606   format %{ "ADD    $dst.lo,$mem\n\t"
8607             "ADC    $dst.hi,$mem+4" %}
8608   opcode(0x03, 0x13);
8609   ins_encode( OpcP, RegMem( dst, mem), OpcS, RegMem_Hi(dst,mem) );
8610   ins_pipe( ialu_reg_long_mem );
8611 %}
8612 
8613 // Subtract Long Register with Register.
8614 instruct subL_eReg(eRegL dst, eRegL src, eFlagsReg cr) %{
8615   match(Set dst (SubL dst src));
8616   effect(KILL cr);
8617   ins_cost(200);
8618   format %{ "SUB    $dst.lo,$src.lo\n\t"
8619             "SBB    $dst.hi,$src.hi" %}
8620   opcode(0x2B, 0x1B);
8621   ins_encode( RegReg_Lo(dst, src), RegReg_Hi(dst,src) );
8622   ins_pipe( ialu_reg_reg_long );
8623 %}
8624 
8625 // Subtract Long Register with Immediate
8626 instruct subL_eReg_imm(eRegL dst, immL src, eFlagsReg cr) %{
8627   match(Set dst (SubL dst src));
8628   effect(KILL cr);
8629   format %{ "SUB    $dst.lo,$src.lo\n\t"
8630             "SBB    $dst.hi,$src.hi" %}
8631   opcode(0x81,0x05,0x03);  /* Opcode 81 /5, 81 /3 */
8632   ins_encode( Long_OpcSErm_Lo( dst, src ), Long_OpcSErm_Hi( dst, src ) );
8633   ins_pipe( ialu_reg_long );
8634 %}
8635 
8636 // Subtract Long Register with Memory
8637 instruct subL_eReg_mem(eRegL dst, load_long_memory mem, eFlagsReg cr) %{
8638   match(Set dst (SubL dst (LoadL mem)));
8639   effect(KILL cr);
8640   ins_cost(125);
8641   format %{ "SUB    $dst.lo,$mem\n\t"
8642             "SBB    $dst.hi,$mem+4" %}
8643   opcode(0x2B, 0x1B);
8644   ins_encode( OpcP, RegMem( dst, mem), OpcS, RegMem_Hi(dst,mem) );
8645   ins_pipe( ialu_reg_long_mem );
8646 %}
8647 
8648 instruct negL_eReg(eRegL dst, immL0 zero, eFlagsReg cr) %{
8649   match(Set dst (SubL zero dst));
8650   effect(KILL cr);
8651   ins_cost(300);
8652   format %{ "NEG    $dst.hi\n\tNEG    $dst.lo\n\tSBB    $dst.hi,0" %}
8653   ins_encode( neg_long(dst) );
8654   ins_pipe( ialu_reg_reg_long );
8655 %}
8656 
8657 // And Long Register with Register
8658 instruct andL_eReg(eRegL dst, eRegL src, eFlagsReg cr) %{
8659   match(Set dst (AndL dst src));
8660   effect(KILL cr);
8661   format %{ "AND    $dst.lo,$src.lo\n\t"
8662             "AND    $dst.hi,$src.hi" %}
8663   opcode(0x23,0x23);
8664   ins_encode( RegReg_Lo( dst, src), RegReg_Hi( dst, src) );
8665   ins_pipe( ialu_reg_reg_long );
8666 %}
8667 
8668 // And Long Register with Immediate
8669 instruct andL_eReg_imm(eRegL dst, immL src, eFlagsReg cr) %{
8670   match(Set dst (AndL dst src));
8671   effect(KILL cr);
8672   format %{ "AND    $dst.lo,$src.lo\n\t"
8673             "AND    $dst.hi,$src.hi" %}
8674   opcode(0x81,0x04,0x04);  /* Opcode 81 /4, 81 /4 */
8675   ins_encode( Long_OpcSErm_Lo( dst, src ), Long_OpcSErm_Hi( dst, src ) );
8676   ins_pipe( ialu_reg_long );
8677 %}
8678 
8679 // And Long Register with Memory
8680 instruct andL_eReg_mem(eRegL dst, load_long_memory mem, eFlagsReg cr) %{
8681   match(Set dst (AndL dst (LoadL mem)));
8682   effect(KILL cr);
8683   ins_cost(125);
8684   format %{ "AND    $dst.lo,$mem\n\t"
8685             "AND    $dst.hi,$mem+4" %}
8686   opcode(0x23, 0x23);
8687   ins_encode( OpcP, RegMem( dst, mem), OpcS, RegMem_Hi(dst,mem) );
8688   ins_pipe( ialu_reg_long_mem );
8689 %}
8690 
8691 // BMI1 instructions
8692 instruct andnL_eReg_eReg_eReg(eRegL dst, eRegL src1, eRegL src2, immL_M1 minus_1, eFlagsReg cr) %{
8693   match(Set dst (AndL (XorL src1 minus_1) src2));
8694   predicate(UseBMI1Instructions);
8695   effect(KILL cr, TEMP dst);
8696 
8697   format %{ "ANDNL  $dst.lo, $src1.lo, $src2.lo\n\t"
8698             "ANDNL  $dst.hi, $src1.hi, $src2.hi"
8699          %}
8700 
8701   ins_encode %{
8702     Register Rdst = $dst$$Register;
8703     Register Rsrc1 = $src1$$Register;
8704     Register Rsrc2 = $src2$$Register;
8705     __ andnl(Rdst, Rsrc1, Rsrc2);
8706     __ andnl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rsrc1), HIGH_FROM_LOW(Rsrc2));
8707   %}
8708   ins_pipe(ialu_reg_reg_long);
8709 %}
8710 
8711 instruct andnL_eReg_eReg_mem(eRegL dst, eRegL src1, memory src2, immL_M1 minus_1, eFlagsReg cr) %{
8712   match(Set dst (AndL (XorL src1 minus_1) (LoadL src2) ));
8713   predicate(UseBMI1Instructions);
8714   effect(KILL cr, TEMP dst);
8715 
8716   ins_cost(125);
8717   format %{ "ANDNL  $dst.lo, $src1.lo, $src2\n\t"
8718             "ANDNL  $dst.hi, $src1.hi, $src2+4"
8719          %}
8720 
8721   ins_encode %{
8722     Register Rdst = $dst$$Register;
8723     Register Rsrc1 = $src1$$Register;
8724     Address src2_hi = Address::make_raw($src2$$base, $src2$$index, $src2$$scale, $src2$$disp + 4, relocInfo::none);
8725 
8726     __ andnl(Rdst, Rsrc1, $src2$$Address);
8727     __ andnl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rsrc1), src2_hi);
8728   %}
8729   ins_pipe(ialu_reg_mem);
8730 %}
8731 
8732 instruct blsiL_eReg_eReg(eRegL dst, eRegL src, immL0 imm_zero, eFlagsReg cr) %{
8733   match(Set dst (AndL (SubL imm_zero src) src));
8734   predicate(UseBMI1Instructions);
8735   effect(KILL cr, TEMP dst);
8736 
8737   format %{ "MOVL   $dst.hi, 0\n\t"
8738             "BLSIL  $dst.lo, $src.lo\n\t"
8739             "JNZ    done\n\t"
8740             "BLSIL  $dst.hi, $src.hi\n"
8741             "done:"
8742          %}
8743 
8744   ins_encode %{
8745     Label done;
8746     Register Rdst = $dst$$Register;
8747     Register Rsrc = $src$$Register;
8748     __ movl(HIGH_FROM_LOW(Rdst), 0);
8749     __ blsil(Rdst, Rsrc);
8750     __ jccb(Assembler::notZero, done);
8751     __ blsil(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rsrc));
8752     __ bind(done);
8753   %}
8754   ins_pipe(ialu_reg);
8755 %}
8756 
8757 instruct blsiL_eReg_mem(eRegL dst, memory src, immL0 imm_zero, eFlagsReg cr) %{
8758   match(Set dst (AndL (SubL imm_zero (LoadL src) ) (LoadL src) ));
8759   predicate(UseBMI1Instructions);
8760   effect(KILL cr, TEMP dst);
8761 
8762   ins_cost(125);
8763   format %{ "MOVL   $dst.hi, 0\n\t"
8764             "BLSIL  $dst.lo, $src\n\t"
8765             "JNZ    done\n\t"
8766             "BLSIL  $dst.hi, $src+4\n"
8767             "done:"
8768          %}
8769 
8770   ins_encode %{
8771     Label done;
8772     Register Rdst = $dst$$Register;
8773     Address src_hi = Address::make_raw($src$$base, $src$$index, $src$$scale, $src$$disp + 4, relocInfo::none);
8774 
8775     __ movl(HIGH_FROM_LOW(Rdst), 0);
8776     __ blsil(Rdst, $src$$Address);
8777     __ jccb(Assembler::notZero, done);
8778     __ blsil(HIGH_FROM_LOW(Rdst), src_hi);
8779     __ bind(done);
8780   %}
8781   ins_pipe(ialu_reg_mem);
8782 %}
8783 
8784 instruct blsmskL_eReg_eReg(eRegL dst, eRegL src, immL_M1 minus_1, eFlagsReg cr)
8785 %{
8786   match(Set dst (XorL (AddL src minus_1) src));
8787   predicate(UseBMI1Instructions);
8788   effect(KILL cr, TEMP dst);
8789 
8790   format %{ "MOVL    $dst.hi, 0\n\t"
8791             "BLSMSKL $dst.lo, $src.lo\n\t"
8792             "JNC     done\n\t"
8793             "BLSMSKL $dst.hi, $src.hi\n"
8794             "done:"
8795          %}
8796 
8797   ins_encode %{
8798     Label done;
8799     Register Rdst = $dst$$Register;
8800     Register Rsrc = $src$$Register;
8801     __ movl(HIGH_FROM_LOW(Rdst), 0);
8802     __ blsmskl(Rdst, Rsrc);
8803     __ jccb(Assembler::carryClear, done);
8804     __ blsmskl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rsrc));
8805     __ bind(done);
8806   %}
8807 
8808   ins_pipe(ialu_reg);
8809 %}
8810 
8811 instruct blsmskL_eReg_mem(eRegL dst, memory src, immL_M1 minus_1, eFlagsReg cr)
8812 %{
8813   match(Set dst (XorL (AddL (LoadL src) minus_1) (LoadL src) ));
8814   predicate(UseBMI1Instructions);
8815   effect(KILL cr, TEMP dst);
8816 
8817   ins_cost(125);
8818   format %{ "MOVL    $dst.hi, 0\n\t"
8819             "BLSMSKL $dst.lo, $src\n\t"
8820             "JNC     done\n\t"
8821             "BLSMSKL $dst.hi, $src+4\n"
8822             "done:"
8823          %}
8824 
8825   ins_encode %{
8826     Label done;
8827     Register Rdst = $dst$$Register;
8828     Address src_hi = Address::make_raw($src$$base, $src$$index, $src$$scale, $src$$disp + 4, relocInfo::none);
8829 
8830     __ movl(HIGH_FROM_LOW(Rdst), 0);
8831     __ blsmskl(Rdst, $src$$Address);
8832     __ jccb(Assembler::carryClear, done);
8833     __ blsmskl(HIGH_FROM_LOW(Rdst), src_hi);
8834     __ bind(done);
8835   %}
8836 
8837   ins_pipe(ialu_reg_mem);
8838 %}
8839 
8840 instruct blsrL_eReg_eReg(eRegL dst, eRegL src, immL_M1 minus_1, eFlagsReg cr)
8841 %{
8842   match(Set dst (AndL (AddL src minus_1) src) );
8843   predicate(UseBMI1Instructions);
8844   effect(KILL cr, TEMP dst);
8845 
8846   format %{ "MOVL   $dst.hi, $src.hi\n\t"
8847             "BLSRL  $dst.lo, $src.lo\n\t"
8848             "JNC    done\n\t"
8849             "BLSRL  $dst.hi, $src.hi\n"
8850             "done:"
8851   %}
8852 
8853   ins_encode %{
8854     Label done;
8855     Register Rdst = $dst$$Register;
8856     Register Rsrc = $src$$Register;
8857     __ movl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rsrc));
8858     __ blsrl(Rdst, Rsrc);
8859     __ jccb(Assembler::carryClear, done);
8860     __ blsrl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rsrc));
8861     __ bind(done);
8862   %}
8863 
8864   ins_pipe(ialu_reg);
8865 %}
8866 
8867 instruct blsrL_eReg_mem(eRegL dst, memory src, immL_M1 minus_1, eFlagsReg cr)
8868 %{
8869   match(Set dst (AndL (AddL (LoadL src) minus_1) (LoadL src) ));
8870   predicate(UseBMI1Instructions);
8871   effect(KILL cr, TEMP dst);
8872 
8873   ins_cost(125);
8874   format %{ "MOVL   $dst.hi, $src+4\n\t"
8875             "BLSRL  $dst.lo, $src\n\t"
8876             "JNC    done\n\t"
8877             "BLSRL  $dst.hi, $src+4\n"
8878             "done:"
8879   %}
8880 
8881   ins_encode %{
8882     Label done;
8883     Register Rdst = $dst$$Register;
8884     Address src_hi = Address::make_raw($src$$base, $src$$index, $src$$scale, $src$$disp + 4, relocInfo::none);
8885     __ movl(HIGH_FROM_LOW(Rdst), src_hi);
8886     __ blsrl(Rdst, $src$$Address);
8887     __ jccb(Assembler::carryClear, done);
8888     __ blsrl(HIGH_FROM_LOW(Rdst), src_hi);
8889     __ bind(done);
8890   %}
8891 
8892   ins_pipe(ialu_reg_mem);
8893 %}
8894 
8895 // Or Long Register with Register
8896 instruct orl_eReg(eRegL dst, eRegL src, eFlagsReg cr) %{
8897   match(Set dst (OrL dst src));
8898   effect(KILL cr);
8899   format %{ "OR     $dst.lo,$src.lo\n\t"
8900             "OR     $dst.hi,$src.hi" %}
8901   opcode(0x0B,0x0B);
8902   ins_encode( RegReg_Lo( dst, src), RegReg_Hi( dst, src) );
8903   ins_pipe( ialu_reg_reg_long );
8904 %}
8905 
8906 // Or Long Register with Immediate
8907 instruct orl_eReg_imm(eRegL dst, immL src, eFlagsReg cr) %{
8908   match(Set dst (OrL dst src));
8909   effect(KILL cr);
8910   format %{ "OR     $dst.lo,$src.lo\n\t"
8911             "OR     $dst.hi,$src.hi" %}
8912   opcode(0x81,0x01,0x01);  /* Opcode 81 /1, 81 /1 */
8913   ins_encode( Long_OpcSErm_Lo( dst, src ), Long_OpcSErm_Hi( dst, src ) );
8914   ins_pipe( ialu_reg_long );
8915 %}
8916 
8917 // Or Long Register with Memory
8918 instruct orl_eReg_mem(eRegL dst, load_long_memory mem, eFlagsReg cr) %{
8919   match(Set dst (OrL dst (LoadL mem)));
8920   effect(KILL cr);
8921   ins_cost(125);
8922   format %{ "OR     $dst.lo,$mem\n\t"
8923             "OR     $dst.hi,$mem+4" %}
8924   opcode(0x0B,0x0B);
8925   ins_encode( OpcP, RegMem( dst, mem), OpcS, RegMem_Hi(dst,mem) );
8926   ins_pipe( ialu_reg_long_mem );
8927 %}
8928 
8929 // Xor Long Register with Register
8930 instruct xorl_eReg(eRegL dst, eRegL src, eFlagsReg cr) %{
8931   match(Set dst (XorL dst src));
8932   effect(KILL cr);
8933   format %{ "XOR    $dst.lo,$src.lo\n\t"
8934             "XOR    $dst.hi,$src.hi" %}
8935   opcode(0x33,0x33);
8936   ins_encode( RegReg_Lo( dst, src), RegReg_Hi( dst, src) );
8937   ins_pipe( ialu_reg_reg_long );
8938 %}
8939 
8940 // Xor Long Register with Immediate -1
8941 instruct xorl_eReg_im1(eRegL dst, immL_M1 imm) %{
8942   match(Set dst (XorL dst imm));  
8943   format %{ "NOT    $dst.lo\n\t"
8944             "NOT    $dst.hi" %}
8945   ins_encode %{
8946      __ notl($dst$$Register);
8947      __ notl(HIGH_FROM_LOW($dst$$Register));
8948   %}
8949   ins_pipe( ialu_reg_long );
8950 %}
8951 
8952 // Xor Long Register with Immediate
8953 instruct xorl_eReg_imm(eRegL dst, immL src, eFlagsReg cr) %{
8954   match(Set dst (XorL dst src));
8955   effect(KILL cr);
8956   format %{ "XOR    $dst.lo,$src.lo\n\t"
8957             "XOR    $dst.hi,$src.hi" %}
8958   opcode(0x81,0x06,0x06);  /* Opcode 81 /6, 81 /6 */
8959   ins_encode( Long_OpcSErm_Lo( dst, src ), Long_OpcSErm_Hi( dst, src ) );
8960   ins_pipe( ialu_reg_long );
8961 %}
8962 
8963 // Xor Long Register with Memory
8964 instruct xorl_eReg_mem(eRegL dst, load_long_memory mem, eFlagsReg cr) %{
8965   match(Set dst (XorL dst (LoadL mem)));
8966   effect(KILL cr);
8967   ins_cost(125);
8968   format %{ "XOR    $dst.lo,$mem\n\t"
8969             "XOR    $dst.hi,$mem+4" %}
8970   opcode(0x33,0x33);
8971   ins_encode( OpcP, RegMem( dst, mem), OpcS, RegMem_Hi(dst,mem) );
8972   ins_pipe( ialu_reg_long_mem );
8973 %}
8974 
8975 // Shift Left Long by 1
8976 instruct shlL_eReg_1(eRegL dst, immI_1 cnt, eFlagsReg cr) %{
8977   predicate(UseNewLongLShift);
8978   match(Set dst (LShiftL dst cnt));
8979   effect(KILL cr);
8980   ins_cost(100);
8981   format %{ "ADD    $dst.lo,$dst.lo\n\t"
8982             "ADC    $dst.hi,$dst.hi" %}
8983   ins_encode %{
8984     __ addl($dst$$Register,$dst$$Register);
8985     __ adcl(HIGH_FROM_LOW($dst$$Register),HIGH_FROM_LOW($dst$$Register));
8986   %}
8987   ins_pipe( ialu_reg_long );
8988 %}
8989 
8990 // Shift Left Long by 2
8991 instruct shlL_eReg_2(eRegL dst, immI_2 cnt, eFlagsReg cr) %{
8992   predicate(UseNewLongLShift);
8993   match(Set dst (LShiftL dst cnt));
8994   effect(KILL cr);
8995   ins_cost(100);
8996   format %{ "ADD    $dst.lo,$dst.lo\n\t"
8997             "ADC    $dst.hi,$dst.hi\n\t" 
8998             "ADD    $dst.lo,$dst.lo\n\t"
8999             "ADC    $dst.hi,$dst.hi" %}
9000   ins_encode %{
9001     __ addl($dst$$Register,$dst$$Register);
9002     __ adcl(HIGH_FROM_LOW($dst$$Register),HIGH_FROM_LOW($dst$$Register));
9003     __ addl($dst$$Register,$dst$$Register);
9004     __ adcl(HIGH_FROM_LOW($dst$$Register),HIGH_FROM_LOW($dst$$Register));
9005   %}
9006   ins_pipe( ialu_reg_long );
9007 %}
9008 
9009 // Shift Left Long by 3
9010 instruct shlL_eReg_3(eRegL dst, immI_3 cnt, eFlagsReg cr) %{
9011   predicate(UseNewLongLShift);
9012   match(Set dst (LShiftL dst cnt));
9013   effect(KILL cr);
9014   ins_cost(100);
9015   format %{ "ADD    $dst.lo,$dst.lo\n\t"
9016             "ADC    $dst.hi,$dst.hi\n\t" 
9017             "ADD    $dst.lo,$dst.lo\n\t"
9018             "ADC    $dst.hi,$dst.hi\n\t" 
9019             "ADD    $dst.lo,$dst.lo\n\t"
9020             "ADC    $dst.hi,$dst.hi" %}
9021   ins_encode %{
9022     __ addl($dst$$Register,$dst$$Register);
9023     __ adcl(HIGH_FROM_LOW($dst$$Register),HIGH_FROM_LOW($dst$$Register));
9024     __ addl($dst$$Register,$dst$$Register);
9025     __ adcl(HIGH_FROM_LOW($dst$$Register),HIGH_FROM_LOW($dst$$Register));
9026     __ addl($dst$$Register,$dst$$Register);
9027     __ adcl(HIGH_FROM_LOW($dst$$Register),HIGH_FROM_LOW($dst$$Register));
9028   %}
9029   ins_pipe( ialu_reg_long );
9030 %}
9031 
9032 // Shift Left Long by 1-31
9033 instruct shlL_eReg_1_31(eRegL dst, immI_1_31 cnt, eFlagsReg cr) %{
9034   match(Set dst (LShiftL dst cnt));
9035   effect(KILL cr);
9036   ins_cost(200);
9037   format %{ "SHLD   $dst.hi,$dst.lo,$cnt\n\t"
9038             "SHL    $dst.lo,$cnt" %}
9039   opcode(0xC1, 0x4, 0xA4);  /* 0F/A4, then C1 /4 ib */
9040   ins_encode( move_long_small_shift(dst,cnt) );
9041   ins_pipe( ialu_reg_long );
9042 %}
9043 
9044 // Shift Left Long by 32-63
9045 instruct shlL_eReg_32_63(eRegL dst, immI_32_63 cnt, eFlagsReg cr) %{
9046   match(Set dst (LShiftL dst cnt));
9047   effect(KILL cr);
9048   ins_cost(300);
9049   format %{ "MOV    $dst.hi,$dst.lo\n"
9050           "\tSHL    $dst.hi,$cnt-32\n"
9051           "\tXOR    $dst.lo,$dst.lo" %}
9052   opcode(0xC1, 0x4);  /* C1 /4 ib */
9053   ins_encode( move_long_big_shift_clr(dst,cnt) );
9054   ins_pipe( ialu_reg_long );
9055 %}
9056 
9057 // Shift Left Long by variable
9058 instruct salL_eReg_CL(eRegL dst, eCXRegI shift, eFlagsReg cr) %{
9059   match(Set dst (LShiftL dst shift));
9060   effect(KILL cr);
9061   ins_cost(500+200);
9062   size(17);
9063   format %{ "TEST   $shift,32\n\t"
9064             "JEQ,s  small\n\t"
9065             "MOV    $dst.hi,$dst.lo\n\t"
9066             "XOR    $dst.lo,$dst.lo\n"
9067     "small:\tSHLD   $dst.hi,$dst.lo,$shift\n\t"
9068             "SHL    $dst.lo,$shift" %}
9069   ins_encode( shift_left_long( dst, shift ) );
9070   ins_pipe( pipe_slow );
9071 %}
9072 
9073 // Shift Right Long by 1-31
9074 instruct shrL_eReg_1_31(eRegL dst, immI_1_31 cnt, eFlagsReg cr) %{
9075   match(Set dst (URShiftL dst cnt));
9076   effect(KILL cr);
9077   ins_cost(200);
9078   format %{ "SHRD   $dst.lo,$dst.hi,$cnt\n\t"
9079             "SHR    $dst.hi,$cnt" %}
9080   opcode(0xC1, 0x5, 0xAC);  /* 0F/AC, then C1 /5 ib */
9081   ins_encode( move_long_small_shift(dst,cnt) );
9082   ins_pipe( ialu_reg_long );
9083 %}
9084 
9085 // Shift Right Long by 32-63
9086 instruct shrL_eReg_32_63(eRegL dst, immI_32_63 cnt, eFlagsReg cr) %{
9087   match(Set dst (URShiftL dst cnt));
9088   effect(KILL cr);
9089   ins_cost(300);
9090   format %{ "MOV    $dst.lo,$dst.hi\n"
9091           "\tSHR    $dst.lo,$cnt-32\n"
9092           "\tXOR    $dst.hi,$dst.hi" %}
9093   opcode(0xC1, 0x5);  /* C1 /5 ib */
9094   ins_encode( move_long_big_shift_clr(dst,cnt) );
9095   ins_pipe( ialu_reg_long );
9096 %}
9097 
9098 // Shift Right Long by variable
9099 instruct shrL_eReg_CL(eRegL dst, eCXRegI shift, eFlagsReg cr) %{
9100   match(Set dst (URShiftL dst shift));
9101   effect(KILL cr);
9102   ins_cost(600);
9103   size(17);
9104   format %{ "TEST   $shift,32\n\t"
9105             "JEQ,s  small\n\t"
9106             "MOV    $dst.lo,$dst.hi\n\t"
9107             "XOR    $dst.hi,$dst.hi\n"
9108     "small:\tSHRD   $dst.lo,$dst.hi,$shift\n\t"
9109             "SHR    $dst.hi,$shift" %}
9110   ins_encode( shift_right_long( dst, shift ) );
9111   ins_pipe( pipe_slow );
9112 %}
9113 
9114 // Shift Right Long by 1-31
9115 instruct sarL_eReg_1_31(eRegL dst, immI_1_31 cnt, eFlagsReg cr) %{
9116   match(Set dst (RShiftL dst cnt));
9117   effect(KILL cr);
9118   ins_cost(200);
9119   format %{ "SHRD   $dst.lo,$dst.hi,$cnt\n\t"
9120             "SAR    $dst.hi,$cnt" %}
9121   opcode(0xC1, 0x7, 0xAC);  /* 0F/AC, then C1 /7 ib */
9122   ins_encode( move_long_small_shift(dst,cnt) );
9123   ins_pipe( ialu_reg_long );
9124 %}
9125 
9126 // Shift Right Long by 32-63
9127 instruct sarL_eReg_32_63( eRegL dst, immI_32_63 cnt, eFlagsReg cr) %{
9128   match(Set dst (RShiftL dst cnt));
9129   effect(KILL cr);
9130   ins_cost(300);
9131   format %{ "MOV    $dst.lo,$dst.hi\n"
9132           "\tSAR    $dst.lo,$cnt-32\n"
9133           "\tSAR    $dst.hi,31" %}
9134   opcode(0xC1, 0x7);  /* C1 /7 ib */
9135   ins_encode( move_long_big_shift_sign(dst,cnt) );
9136   ins_pipe( ialu_reg_long );
9137 %}
9138 
9139 // Shift Right arithmetic Long by variable
9140 instruct sarL_eReg_CL(eRegL dst, eCXRegI shift, eFlagsReg cr) %{
9141   match(Set dst (RShiftL dst shift));
9142   effect(KILL cr);
9143   ins_cost(600);
9144   size(18);
9145   format %{ "TEST   $shift,32\n\t"
9146             "JEQ,s  small\n\t"
9147             "MOV    $dst.lo,$dst.hi\n\t"
9148             "SAR    $dst.hi,31\n"
9149     "small:\tSHRD   $dst.lo,$dst.hi,$shift\n\t"
9150             "SAR    $dst.hi,$shift" %}
9151   ins_encode( shift_right_arith_long( dst, shift ) );
9152   ins_pipe( pipe_slow );
9153 %}
9154 
9155 
9156 //----------Double Instructions------------------------------------------------
9157 // Double Math
9158 
9159 // Compare & branch
9160 
9161 // P6 version of float compare, sets condition codes in EFLAGS
9162 instruct cmpDPR_cc_P6(eFlagsRegU cr, regDPR src1, regDPR src2, eAXRegI rax) %{
9163   predicate(VM_Version::supports_cmov() && UseSSE <=1);
9164   match(Set cr (CmpD src1 src2));
9165   effect(KILL rax);
9166   ins_cost(150);
9167   format %{ "FLD    $src1\n\t"
9168             "FUCOMIP ST,$src2  // P6 instruction\n\t"
9169             "JNP    exit\n\t"
9170             "MOV    ah,1       // saw a NaN, set CF\n\t"
9171             "SAHF\n"
9172      "exit:\tNOP               // avoid branch to branch" %}
9173   opcode(0xDF, 0x05); /* DF E8+i or DF /5 */
9174   ins_encode( Push_Reg_DPR(src1),
9175               OpcP, RegOpc(src2),
9176               cmpF_P6_fixup );
9177   ins_pipe( pipe_slow );
9178 %}
9179 
9180 instruct cmpDPR_cc_P6CF(eFlagsRegUCF cr, regDPR src1, regDPR src2) %{
9181   predicate(VM_Version::supports_cmov() && UseSSE <=1);
9182   match(Set cr (CmpD src1 src2));
9183   ins_cost(150);
9184   format %{ "FLD    $src1\n\t"
9185             "FUCOMIP ST,$src2  // P6 instruction" %}
9186   opcode(0xDF, 0x05); /* DF E8+i or DF /5 */
9187   ins_encode( Push_Reg_DPR(src1),
9188               OpcP, RegOpc(src2));
9189   ins_pipe( pipe_slow );
9190 %}
9191 
9192 // Compare & branch
9193 instruct cmpDPR_cc(eFlagsRegU cr, regDPR src1, regDPR src2, eAXRegI rax) %{
9194   predicate(UseSSE<=1);
9195   match(Set cr (CmpD src1 src2));
9196   effect(KILL rax);
9197   ins_cost(200);
9198   format %{ "FLD    $src1\n\t"
9199             "FCOMp  $src2\n\t"
9200             "FNSTSW AX\n\t"
9201             "TEST   AX,0x400\n\t"
9202             "JZ,s   flags\n\t"
9203             "MOV    AH,1\t# unordered treat as LT\n"
9204     "flags:\tSAHF" %}
9205   opcode(0xD8, 0x3); /* D8 D8+i or D8 /3 */
9206   ins_encode( Push_Reg_DPR(src1),
9207               OpcP, RegOpc(src2),
9208               fpu_flags);
9209   ins_pipe( pipe_slow );
9210 %}
9211 
9212 // Compare vs zero into -1,0,1
9213 instruct cmpDPR_0(rRegI dst, regDPR src1, immDPR0 zero, eAXRegI rax, eFlagsReg cr) %{
9214   predicate(UseSSE<=1);
9215   match(Set dst (CmpD3 src1 zero));
9216   effect(KILL cr, KILL rax);
9217   ins_cost(280);
9218   format %{ "FTSTD  $dst,$src1" %}
9219   opcode(0xE4, 0xD9);
9220   ins_encode( Push_Reg_DPR(src1),
9221               OpcS, OpcP, PopFPU,
9222               CmpF_Result(dst));
9223   ins_pipe( pipe_slow );
9224 %}
9225 
9226 // Compare into -1,0,1
9227 instruct cmpDPR_reg(rRegI dst, regDPR src1, regDPR src2, eAXRegI rax, eFlagsReg cr) %{
9228   predicate(UseSSE<=1);
9229   match(Set dst (CmpD3 src1 src2));
9230   effect(KILL cr, KILL rax);
9231   ins_cost(300);
9232   format %{ "FCMPD  $dst,$src1,$src2" %}
9233   opcode(0xD8, 0x3); /* D8 D8+i or D8 /3 */
9234   ins_encode( Push_Reg_DPR(src1),
9235               OpcP, RegOpc(src2),
9236               CmpF_Result(dst));
9237   ins_pipe( pipe_slow );
9238 %}
9239 
9240 // float compare and set condition codes in EFLAGS by XMM regs
9241 instruct cmpD_cc(eFlagsRegU cr, regD src1, regD src2) %{
9242   predicate(UseSSE>=2);
9243   match(Set cr (CmpD src1 src2));
9244   ins_cost(145);
9245   format %{ "UCOMISD $src1,$src2\n\t"
9246             "JNP,s   exit\n\t"
9247             "PUSHF\t# saw NaN, set CF\n\t"
9248             "AND     [rsp], #0xffffff2b\n\t"
9249             "POPF\n"
9250     "exit:" %}
9251   ins_encode %{
9252     __ ucomisd($src1$$XMMRegister, $src2$$XMMRegister);
9253     emit_cmpfp_fixup(_masm);
9254   %}
9255   ins_pipe( pipe_slow );
9256 %}
9257 
9258 instruct cmpD_ccCF(eFlagsRegUCF cr, regD src1, regD src2) %{
9259   predicate(UseSSE>=2);
9260   match(Set cr (CmpD src1 src2));
9261   ins_cost(100);
9262   format %{ "UCOMISD $src1,$src2" %}
9263   ins_encode %{
9264     __ ucomisd($src1$$XMMRegister, $src2$$XMMRegister);
9265   %}
9266   ins_pipe( pipe_slow );
9267 %}
9268 
9269 // float compare and set condition codes in EFLAGS by XMM regs
9270 instruct cmpD_ccmem(eFlagsRegU cr, regD src1, memory src2) %{
9271   predicate(UseSSE>=2);
9272   match(Set cr (CmpD src1 (LoadD src2)));
9273   ins_cost(145);
9274   format %{ "UCOMISD $src1,$src2\n\t"
9275             "JNP,s   exit\n\t"
9276             "PUSHF\t# saw NaN, set CF\n\t"
9277             "AND     [rsp], #0xffffff2b\n\t"
9278             "POPF\n"
9279     "exit:" %}
9280   ins_encode %{
9281     __ ucomisd($src1$$XMMRegister, $src2$$Address);
9282     emit_cmpfp_fixup(_masm);
9283   %}
9284   ins_pipe( pipe_slow );
9285 %}
9286 
9287 instruct cmpD_ccmemCF(eFlagsRegUCF cr, regD src1, memory src2) %{
9288   predicate(UseSSE>=2);
9289   match(Set cr (CmpD src1 (LoadD src2)));
9290   ins_cost(100);
9291   format %{ "UCOMISD $src1,$src2" %}
9292   ins_encode %{
9293     __ ucomisd($src1$$XMMRegister, $src2$$Address);
9294   %}
9295   ins_pipe( pipe_slow );
9296 %}
9297 
9298 // Compare into -1,0,1 in XMM
9299 instruct cmpD_reg(xRegI dst, regD src1, regD src2, eFlagsReg cr) %{
9300   predicate(UseSSE>=2);
9301   match(Set dst (CmpD3 src1 src2));
9302   effect(KILL cr);
9303   ins_cost(255);
9304   format %{ "UCOMISD $src1, $src2\n\t"
9305             "MOV     $dst, #-1\n\t"
9306             "JP,s    done\n\t"
9307             "JB,s    done\n\t"
9308             "SETNE   $dst\n\t"
9309             "MOVZB   $dst, $dst\n"
9310     "done:" %}
9311   ins_encode %{
9312     __ ucomisd($src1$$XMMRegister, $src2$$XMMRegister);
9313     emit_cmpfp3(_masm, $dst$$Register);
9314   %}
9315   ins_pipe( pipe_slow );
9316 %}
9317 
9318 // Compare into -1,0,1 in XMM and memory
9319 instruct cmpD_regmem(xRegI dst, regD src1, memory src2, eFlagsReg cr) %{
9320   predicate(UseSSE>=2);
9321   match(Set dst (CmpD3 src1 (LoadD src2)));
9322   effect(KILL cr);
9323   ins_cost(275);
9324   format %{ "UCOMISD $src1, $src2\n\t"
9325             "MOV     $dst, #-1\n\t"
9326             "JP,s    done\n\t"
9327             "JB,s    done\n\t"
9328             "SETNE   $dst\n\t"
9329             "MOVZB   $dst, $dst\n"
9330     "done:" %}
9331   ins_encode %{
9332     __ ucomisd($src1$$XMMRegister, $src2$$Address);
9333     emit_cmpfp3(_masm, $dst$$Register);
9334   %}
9335   ins_pipe( pipe_slow );
9336 %}
9337 
9338 
9339 instruct subDPR_reg(regDPR dst, regDPR src) %{
9340   predicate (UseSSE <=1);
9341   match(Set dst (SubD dst src));
9342 
9343   format %{ "FLD    $src\n\t"
9344             "DSUBp  $dst,ST" %}
9345   opcode(0xDE, 0x5); /* DE E8+i  or DE /5 */
9346   ins_cost(150);
9347   ins_encode( Push_Reg_DPR(src),
9348               OpcP, RegOpc(dst) );
9349   ins_pipe( fpu_reg_reg );
9350 %}
9351 
9352 instruct subDPR_reg_round(stackSlotD dst, regDPR src1, regDPR src2) %{
9353   predicate (UseSSE <=1);
9354   match(Set dst (RoundDouble (SubD src1 src2)));
9355   ins_cost(250);
9356 
9357   format %{ "FLD    $src2\n\t"
9358             "DSUB   ST,$src1\n\t"
9359             "FSTP_D $dst\t# D-round" %}
9360   opcode(0xD8, 0x5);
9361   ins_encode( Push_Reg_DPR(src2),
9362               OpcP, RegOpc(src1), Pop_Mem_DPR(dst) );
9363   ins_pipe( fpu_mem_reg_reg );
9364 %}
9365 
9366 
9367 instruct subDPR_reg_mem(regDPR dst, memory src) %{
9368   predicate (UseSSE <=1);
9369   match(Set dst (SubD dst (LoadD src)));
9370   ins_cost(150);
9371 
9372   format %{ "FLD    $src\n\t"
9373             "DSUBp  $dst,ST" %}
9374   opcode(0xDE, 0x5, 0xDD); /* DE C0+i */  /* LoadD  DD /0 */
9375   ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src),
9376               OpcP, RegOpc(dst) );
9377   ins_pipe( fpu_reg_mem );
9378 %}
9379 
9380 instruct absDPR_reg(regDPR1 dst, regDPR1 src) %{
9381   predicate (UseSSE<=1);
9382   match(Set dst (AbsD src));
9383   ins_cost(100);
9384   format %{ "FABS" %}
9385   opcode(0xE1, 0xD9);
9386   ins_encode( OpcS, OpcP );
9387   ins_pipe( fpu_reg_reg );
9388 %}
9389 
9390 instruct negDPR_reg(regDPR1 dst, regDPR1 src) %{
9391   predicate(UseSSE<=1);
9392   match(Set dst (NegD src));
9393   ins_cost(100);
9394   format %{ "FCHS" %}
9395   opcode(0xE0, 0xD9);
9396   ins_encode( OpcS, OpcP );
9397   ins_pipe( fpu_reg_reg );
9398 %}
9399 
9400 instruct addDPR_reg(regDPR dst, regDPR src) %{
9401   predicate(UseSSE<=1);
9402   match(Set dst (AddD dst src));
9403   format %{ "FLD    $src\n\t"
9404             "DADD   $dst,ST" %}
9405   size(4);
9406   ins_cost(150);
9407   opcode(0xDE, 0x0); /* DE C0+i or DE /0*/
9408   ins_encode( Push_Reg_DPR(src),
9409               OpcP, RegOpc(dst) );
9410   ins_pipe( fpu_reg_reg );
9411 %}
9412 
9413 
9414 instruct addDPR_reg_round(stackSlotD dst, regDPR src1, regDPR src2) %{
9415   predicate(UseSSE<=1);
9416   match(Set dst (RoundDouble (AddD src1 src2)));
9417   ins_cost(250);
9418 
9419   format %{ "FLD    $src2\n\t"
9420             "DADD   ST,$src1\n\t"
9421             "FSTP_D $dst\t# D-round" %}
9422   opcode(0xD8, 0x0); /* D8 C0+i or D8 /0*/
9423   ins_encode( Push_Reg_DPR(src2),
9424               OpcP, RegOpc(src1), Pop_Mem_DPR(dst) );
9425   ins_pipe( fpu_mem_reg_reg );
9426 %}
9427 
9428 
9429 instruct addDPR_reg_mem(regDPR dst, memory src) %{
9430   predicate(UseSSE<=1);
9431   match(Set dst (AddD dst (LoadD src)));
9432   ins_cost(150);
9433 
9434   format %{ "FLD    $src\n\t"
9435             "DADDp  $dst,ST" %}
9436   opcode(0xDE, 0x0, 0xDD); /* DE C0+i */  /* LoadD  DD /0 */
9437   ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src),
9438               OpcP, RegOpc(dst) );
9439   ins_pipe( fpu_reg_mem );
9440 %}
9441 
9442 // add-to-memory
9443 instruct addDPR_mem_reg(memory dst, regDPR src) %{
9444   predicate(UseSSE<=1);
9445   match(Set dst (StoreD dst (RoundDouble (AddD (LoadD dst) src))));
9446   ins_cost(150);
9447 
9448   format %{ "FLD_D  $dst\n\t"
9449             "DADD   ST,$src\n\t"
9450             "FST_D  $dst" %}
9451   opcode(0xDD, 0x0);
9452   ins_encode( Opcode(0xDD), RMopc_Mem(0x00,dst),
9453               Opcode(0xD8), RegOpc(src),
9454               set_instruction_start,
9455               Opcode(0xDD), RMopc_Mem(0x03,dst) );
9456   ins_pipe( fpu_reg_mem );
9457 %}
9458 
9459 instruct addDPR_reg_imm1(regDPR dst, immDPR1 con) %{
9460   predicate(UseSSE<=1);
9461   match(Set dst (AddD dst con));
9462   ins_cost(125);
9463   format %{ "FLD1\n\t"
9464             "DADDp  $dst,ST" %}
9465   ins_encode %{
9466     __ fld1();
9467     __ faddp($dst$$reg);
9468   %}
9469   ins_pipe(fpu_reg);
9470 %}
9471 
9472 instruct addDPR_reg_imm(regDPR dst, immDPR con) %{
9473   predicate(UseSSE<=1 && _kids[1]->_leaf->getd() != 0.0 && _kids[1]->_leaf->getd() != 1.0 );
9474   match(Set dst (AddD dst con));
9475   ins_cost(200);
9476   format %{ "FLD_D  [$constantaddress]\t# load from constant table: double=$con\n\t"
9477             "DADDp  $dst,ST" %}
9478   ins_encode %{
9479     __ fld_d($constantaddress($con));
9480     __ faddp($dst$$reg);
9481   %}
9482   ins_pipe(fpu_reg_mem);
9483 %}
9484 
9485 instruct addDPR_reg_imm_round(stackSlotD dst, regDPR src, immDPR con) %{
9486   predicate(UseSSE<=1 && _kids[0]->_kids[1]->_leaf->getd() != 0.0 && _kids[0]->_kids[1]->_leaf->getd() != 1.0 );
9487   match(Set dst (RoundDouble (AddD src con)));
9488   ins_cost(200);
9489   format %{ "FLD_D  [$constantaddress]\t# load from constant table: double=$con\n\t"
9490             "DADD   ST,$src\n\t"
9491             "FSTP_D $dst\t# D-round" %}
9492   ins_encode %{
9493     __ fld_d($constantaddress($con));
9494     __ fadd($src$$reg);
9495     __ fstp_d(Address(rsp, $dst$$disp));
9496   %}
9497   ins_pipe(fpu_mem_reg_con);
9498 %}
9499 
9500 instruct mulDPR_reg(regDPR dst, regDPR src) %{
9501   predicate(UseSSE<=1);
9502   match(Set dst (MulD dst src));
9503   format %{ "FLD    $src\n\t"
9504             "DMULp  $dst,ST" %}
9505   opcode(0xDE, 0x1); /* DE C8+i or DE /1*/
9506   ins_cost(150);
9507   ins_encode( Push_Reg_DPR(src),
9508               OpcP, RegOpc(dst) );
9509   ins_pipe( fpu_reg_reg );
9510 %}
9511 
9512 // Strict FP instruction biases argument before multiply then
9513 // biases result to avoid double rounding of subnormals.
9514 //
9515 // scale arg1 by multiplying arg1 by 2^(-15360)
9516 // load arg2
9517 // multiply scaled arg1 by arg2
9518 // rescale product by 2^(15360)
9519 //
9520 instruct strictfp_mulDPR_reg(regDPR1 dst, regnotDPR1 src) %{
9521   predicate( UseSSE<=1 && Compile::current()->has_method() && Compile::current()->method()->is_strict() );
9522   match(Set dst (MulD dst src));
9523   ins_cost(1);   // Select this instruction for all strict FP double multiplies
9524 
9525   format %{ "FLD    StubRoutines::_fpu_subnormal_bias1\n\t"
9526             "DMULp  $dst,ST\n\t"
9527             "FLD    $src\n\t"
9528             "DMULp  $dst,ST\n\t"
9529             "FLD    StubRoutines::_fpu_subnormal_bias2\n\t"
9530             "DMULp  $dst,ST\n\t" %}
9531   opcode(0xDE, 0x1); /* DE C8+i or DE /1*/
9532   ins_encode( strictfp_bias1(dst),
9533               Push_Reg_DPR(src),
9534               OpcP, RegOpc(dst),
9535               strictfp_bias2(dst) );
9536   ins_pipe( fpu_reg_reg );
9537 %}
9538 
9539 instruct mulDPR_reg_imm(regDPR dst, immDPR con) %{
9540   predicate( UseSSE<=1 && _kids[1]->_leaf->getd() != 0.0 && _kids[1]->_leaf->getd() != 1.0 );
9541   match(Set dst (MulD dst con));
9542   ins_cost(200);
9543   format %{ "FLD_D  [$constantaddress]\t# load from constant table: double=$con\n\t"
9544             "DMULp  $dst,ST" %}
9545   ins_encode %{
9546     __ fld_d($constantaddress($con));
9547     __ fmulp($dst$$reg);
9548   %}
9549   ins_pipe(fpu_reg_mem);
9550 %}
9551 
9552 
9553 instruct mulDPR_reg_mem(regDPR dst, memory src) %{
9554   predicate( UseSSE<=1 );
9555   match(Set dst (MulD dst (LoadD src)));
9556   ins_cost(200);
9557   format %{ "FLD_D  $src\n\t"
9558             "DMULp  $dst,ST" %}
9559   opcode(0xDE, 0x1, 0xDD); /* DE C8+i or DE /1*/  /* LoadD  DD /0 */
9560   ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src),
9561               OpcP, RegOpc(dst) );
9562   ins_pipe( fpu_reg_mem );
9563 %}
9564 
9565 //
9566 // Cisc-alternate to reg-reg multiply
9567 instruct mulDPR_reg_mem_cisc(regDPR dst, regDPR src, memory mem) %{
9568   predicate( UseSSE<=1 );
9569   match(Set dst (MulD src (LoadD mem)));
9570   ins_cost(250);
9571   format %{ "FLD_D  $mem\n\t"
9572             "DMUL   ST,$src\n\t"
9573             "FSTP_D $dst" %}
9574   opcode(0xD8, 0x1, 0xD9); /* D8 C8+i */  /* LoadD D9 /0 */
9575   ins_encode( Opcode(tertiary), RMopc_Mem(0x00,mem),
9576               OpcReg_FPR(src),
9577               Pop_Reg_DPR(dst) );
9578   ins_pipe( fpu_reg_reg_mem );
9579 %}
9580 
9581 
9582 // MACRO3 -- addDPR a mulDPR
9583 // This instruction is a '2-address' instruction in that the result goes
9584 // back to src2.  This eliminates a move from the macro; possibly the
9585 // register allocator will have to add it back (and maybe not).
9586 instruct addDPR_mulDPR_reg(regDPR src2, regDPR src1, regDPR src0) %{
9587   predicate( UseSSE<=1 );
9588   match(Set src2 (AddD (MulD src0 src1) src2));
9589   format %{ "FLD    $src0\t# ===MACRO3d===\n\t"
9590             "DMUL   ST,$src1\n\t"
9591             "DADDp  $src2,ST" %}
9592   ins_cost(250);
9593   opcode(0xDD); /* LoadD DD /0 */
9594   ins_encode( Push_Reg_FPR(src0),
9595               FMul_ST_reg(src1),
9596               FAddP_reg_ST(src2) );
9597   ins_pipe( fpu_reg_reg_reg );
9598 %}
9599 
9600 
9601 // MACRO3 -- subDPR a mulDPR
9602 instruct subDPR_mulDPR_reg(regDPR src2, regDPR src1, regDPR src0) %{
9603   predicate( UseSSE<=1 );
9604   match(Set src2 (SubD (MulD src0 src1) src2));
9605   format %{ "FLD    $src0\t# ===MACRO3d===\n\t"
9606             "DMUL   ST,$src1\n\t"
9607             "DSUBRp $src2,ST" %}
9608   ins_cost(250);
9609   ins_encode( Push_Reg_FPR(src0),
9610               FMul_ST_reg(src1),
9611               Opcode(0xDE), Opc_plus(0xE0,src2));
9612   ins_pipe( fpu_reg_reg_reg );
9613 %}
9614 
9615 
9616 instruct divDPR_reg(regDPR dst, regDPR src) %{
9617   predicate( UseSSE<=1 );
9618   match(Set dst (DivD dst src));
9619 
9620   format %{ "FLD    $src\n\t"
9621             "FDIVp  $dst,ST" %}
9622   opcode(0xDE, 0x7); /* DE F8+i or DE /7*/
9623   ins_cost(150);
9624   ins_encode( Push_Reg_DPR(src),
9625               OpcP, RegOpc(dst) );
9626   ins_pipe( fpu_reg_reg );
9627 %}
9628 
9629 // Strict FP instruction biases argument before division then
9630 // biases result, to avoid double rounding of subnormals.
9631 //
9632 // scale dividend by multiplying dividend by 2^(-15360)
9633 // load divisor
9634 // divide scaled dividend by divisor
9635 // rescale quotient by 2^(15360)
9636 //
9637 instruct strictfp_divDPR_reg(regDPR1 dst, regnotDPR1 src) %{
9638   predicate (UseSSE<=1);
9639   match(Set dst (DivD dst src));
9640   predicate( UseSSE<=1 && Compile::current()->has_method() && Compile::current()->method()->is_strict() );
9641   ins_cost(01);
9642 
9643   format %{ "FLD    StubRoutines::_fpu_subnormal_bias1\n\t"
9644             "DMULp  $dst,ST\n\t"
9645             "FLD    $src\n\t"
9646             "FDIVp  $dst,ST\n\t"
9647             "FLD    StubRoutines::_fpu_subnormal_bias2\n\t"
9648             "DMULp  $dst,ST\n\t" %}
9649   opcode(0xDE, 0x7); /* DE F8+i or DE /7*/
9650   ins_encode( strictfp_bias1(dst),
9651               Push_Reg_DPR(src),
9652               OpcP, RegOpc(dst),
9653               strictfp_bias2(dst) );
9654   ins_pipe( fpu_reg_reg );
9655 %}
9656 
9657 instruct divDPR_reg_round(stackSlotD dst, regDPR src1, regDPR src2) %{
9658   predicate( UseSSE<=1 && !(Compile::current()->has_method() && Compile::current()->method()->is_strict()) );
9659   match(Set dst (RoundDouble (DivD src1 src2)));
9660 
9661   format %{ "FLD    $src1\n\t"
9662             "FDIV   ST,$src2\n\t"
9663             "FSTP_D $dst\t# D-round" %}
9664   opcode(0xD8, 0x6); /* D8 F0+i or D8 /6 */
9665   ins_encode( Push_Reg_DPR(src1),
9666               OpcP, RegOpc(src2), Pop_Mem_DPR(dst) );
9667   ins_pipe( fpu_mem_reg_reg );
9668 %}
9669 
9670 
9671 instruct modDPR_reg(regDPR dst, regDPR src, eAXRegI rax, eFlagsReg cr) %{
9672   predicate(UseSSE<=1);
9673   match(Set dst (ModD dst src));
9674   effect(KILL rax, KILL cr); // emitModDPR() uses EAX and EFLAGS
9675 
9676   format %{ "DMOD   $dst,$src" %}
9677   ins_cost(250);
9678   ins_encode(Push_Reg_Mod_DPR(dst, src),
9679               emitModDPR(),
9680               Push_Result_Mod_DPR(src),
9681               Pop_Reg_DPR(dst));
9682   ins_pipe( pipe_slow );
9683 %}
9684 
9685 instruct modD_reg(regD dst, regD src0, regD src1, eAXRegI rax, eFlagsReg cr) %{
9686   predicate(UseSSE>=2);
9687   match(Set dst (ModD src0 src1));
9688   effect(KILL rax, KILL cr);
9689 
9690   format %{ "SUB    ESP,8\t # DMOD\n"
9691           "\tMOVSD  [ESP+0],$src1\n"
9692           "\tFLD_D  [ESP+0]\n"
9693           "\tMOVSD  [ESP+0],$src0\n"
9694           "\tFLD_D  [ESP+0]\n"
9695      "loop:\tFPREM\n"
9696           "\tFWAIT\n"
9697           "\tFNSTSW AX\n"
9698           "\tSAHF\n"
9699           "\tJP     loop\n"
9700           "\tFSTP_D [ESP+0]\n"
9701           "\tMOVSD  $dst,[ESP+0]\n"
9702           "\tADD    ESP,8\n"
9703           "\tFSTP   ST0\t # Restore FPU Stack"
9704     %}
9705   ins_cost(250);
9706   ins_encode( Push_ModD_encoding(src0, src1), emitModDPR(), Push_ResultD(dst), PopFPU);
9707   ins_pipe( pipe_slow );
9708 %}
9709 
9710 instruct sinDPR_reg(regDPR1 dst, regDPR1 src) %{
9711   predicate (UseSSE<=1);
9712   match(Set dst (SinD src));
9713   ins_cost(1800);
9714   format %{ "DSIN   $dst" %}
9715   opcode(0xD9, 0xFE);
9716   ins_encode( OpcP, OpcS );
9717   ins_pipe( pipe_slow );
9718 %}
9719 
9720 instruct sinD_reg(regD dst, eFlagsReg cr) %{
9721   predicate (UseSSE>=2);
9722   match(Set dst (SinD dst));
9723   effect(KILL cr); // Push_{Src|Result}D() uses "{SUB|ADD} ESP,8"
9724   ins_cost(1800);
9725   format %{ "DSIN   $dst" %}
9726   opcode(0xD9, 0xFE);
9727   ins_encode( Push_SrcD(dst), OpcP, OpcS, Push_ResultD(dst) );
9728   ins_pipe( pipe_slow );
9729 %}
9730 
9731 instruct cosDPR_reg(regDPR1 dst, regDPR1 src) %{
9732   predicate (UseSSE<=1);
9733   match(Set dst (CosD src));
9734   ins_cost(1800);
9735   format %{ "DCOS   $dst" %}
9736   opcode(0xD9, 0xFF);
9737   ins_encode( OpcP, OpcS );
9738   ins_pipe( pipe_slow );
9739 %}
9740 
9741 instruct cosD_reg(regD dst, eFlagsReg cr) %{
9742   predicate (UseSSE>=2);
9743   match(Set dst (CosD dst));
9744   effect(KILL cr); // Push_{Src|Result}D() uses "{SUB|ADD} ESP,8"
9745   ins_cost(1800);
9746   format %{ "DCOS   $dst" %}
9747   opcode(0xD9, 0xFF);
9748   ins_encode( Push_SrcD(dst), OpcP, OpcS, Push_ResultD(dst) );
9749   ins_pipe( pipe_slow );
9750 %}
9751 
9752 instruct tanDPR_reg(regDPR1 dst, regDPR1 src) %{
9753   predicate (UseSSE<=1);
9754   match(Set dst(TanD src));
9755   format %{ "DTAN   $dst" %}
9756   ins_encode( Opcode(0xD9), Opcode(0xF2),    // fptan
9757               Opcode(0xDD), Opcode(0xD8));   // fstp st
9758   ins_pipe( pipe_slow );
9759 %}
9760 
9761 instruct tanD_reg(regD dst, eFlagsReg cr) %{
9762   predicate (UseSSE>=2);
9763   match(Set dst(TanD dst));
9764   effect(KILL cr); // Push_{Src|Result}D() uses "{SUB|ADD} ESP,8"
9765   format %{ "DTAN   $dst" %}
9766   ins_encode( Push_SrcD(dst),
9767               Opcode(0xD9), Opcode(0xF2),    // fptan
9768               Opcode(0xDD), Opcode(0xD8),   // fstp st
9769               Push_ResultD(dst) );
9770   ins_pipe( pipe_slow );
9771 %}
9772 
9773 instruct atanDPR_reg(regDPR dst, regDPR src) %{
9774   predicate (UseSSE<=1);
9775   match(Set dst(AtanD dst src));
9776   format %{ "DATA   $dst,$src" %}
9777   opcode(0xD9, 0xF3);
9778   ins_encode( Push_Reg_DPR(src),
9779               OpcP, OpcS, RegOpc(dst) );
9780   ins_pipe( pipe_slow );
9781 %}
9782 
9783 instruct atanD_reg(regD dst, regD src, eFlagsReg cr) %{
9784   predicate (UseSSE>=2);
9785   match(Set dst(AtanD dst src));
9786   effect(KILL cr); // Push_{Src|Result}D() uses "{SUB|ADD} ESP,8"
9787   format %{ "DATA   $dst,$src" %}
9788   opcode(0xD9, 0xF3);
9789   ins_encode( Push_SrcD(src),
9790               OpcP, OpcS, Push_ResultD(dst) );
9791   ins_pipe( pipe_slow );
9792 %}
9793 
9794 instruct sqrtDPR_reg(regDPR dst, regDPR src) %{
9795   predicate (UseSSE<=1);
9796   match(Set dst (SqrtD src));
9797   format %{ "DSQRT  $dst,$src" %}
9798   opcode(0xFA, 0xD9);
9799   ins_encode( Push_Reg_DPR(src),
9800               OpcS, OpcP, Pop_Reg_DPR(dst) );
9801   ins_pipe( pipe_slow );
9802 %}
9803 
9804 instruct powDPR_reg(regDPR X, regDPR1 Y, eAXRegI rax, eDXRegI rdx, eCXRegI rcx, eFlagsReg cr) %{
9805   predicate (UseSSE<=1);
9806   match(Set Y (PowD X Y));  // Raise X to the Yth power
9807   effect(KILL rax, KILL rdx, KILL rcx, KILL cr);
9808   format %{ "fast_pow $X $Y -> $Y  // KILL $rax, $rcx, $rdx" %}
9809   ins_encode %{
9810     __ subptr(rsp, 8);
9811     __ fld_s($X$$reg - 1);
9812     __ fast_pow();
9813     __ addptr(rsp, 8);
9814   %}
9815   ins_pipe( pipe_slow );
9816 %}
9817 
9818 instruct powD_reg(regD dst, regD src0, regD src1, eAXRegI rax, eDXRegI rdx, eCXRegI rcx, eFlagsReg cr) %{
9819   predicate (UseSSE>=2);
9820   match(Set dst (PowD src0 src1));  // Raise src0 to the src1'th power
9821   effect(KILL rax, KILL rdx, KILL rcx, KILL cr);
9822   format %{ "fast_pow $src0 $src1 -> $dst  // KILL $rax, $rcx, $rdx" %}
9823   ins_encode %{
9824     __ subptr(rsp, 8);
9825     __ movdbl(Address(rsp, 0), $src1$$XMMRegister);
9826     __ fld_d(Address(rsp, 0));
9827     __ movdbl(Address(rsp, 0), $src0$$XMMRegister);
9828     __ fld_d(Address(rsp, 0));
9829     __ fast_pow();
9830     __ fstp_d(Address(rsp, 0));
9831     __ movdbl($dst$$XMMRegister, Address(rsp, 0));
9832     __ addptr(rsp, 8);
9833   %}
9834   ins_pipe( pipe_slow );
9835 %}
9836 
9837 
9838 instruct expDPR_reg(regDPR1 dpr1, eAXRegI rax, eDXRegI rdx, eCXRegI rcx, eFlagsReg cr) %{
9839   predicate (UseSSE<=1);
9840   match(Set dpr1 (ExpD dpr1));
9841   effect(KILL rax, KILL rcx, KILL rdx, KILL cr);
9842   format %{ "fast_exp $dpr1 -> $dpr1  // KILL $rax, $rcx, $rdx" %}
9843   ins_encode %{
9844     __ fast_exp();
9845   %}
9846   ins_pipe( pipe_slow );
9847 %}
9848 
9849 instruct expD_reg(regD dst, regD src, eAXRegI rax, eDXRegI rdx, eCXRegI rcx, eFlagsReg cr) %{
9850   predicate (UseSSE>=2);
9851   match(Set dst (ExpD src));
9852   effect(KILL rax, KILL rcx, KILL rdx, KILL cr);
9853   format %{ "fast_exp $dst -> $src  // KILL $rax, $rcx, $rdx" %}
9854   ins_encode %{
9855     __ subptr(rsp, 8);
9856     __ movdbl(Address(rsp, 0), $src$$XMMRegister);
9857     __ fld_d(Address(rsp, 0));
9858     __ fast_exp();
9859     __ fstp_d(Address(rsp, 0));
9860     __ movdbl($dst$$XMMRegister, Address(rsp, 0));
9861     __ addptr(rsp, 8);
9862   %}
9863   ins_pipe( pipe_slow );
9864 %}
9865 
9866 instruct log10DPR_reg(regDPR1 dst, regDPR1 src) %{
9867   predicate (UseSSE<=1);
9868   // The source Double operand on FPU stack
9869   match(Set dst (Log10D src));
9870   // fldlg2       ; push log_10(2) on the FPU stack; full 80-bit number
9871   // fxch         ; swap ST(0) with ST(1)
9872   // fyl2x        ; compute log_10(2) * log_2(x)
9873   format %{ "FLDLG2 \t\t\t#Log10\n\t"
9874             "FXCH   \n\t"
9875             "FYL2X  \t\t\t# Q=Log10*Log_2(x)"
9876          %}
9877   ins_encode( Opcode(0xD9), Opcode(0xEC),   // fldlg2
9878               Opcode(0xD9), Opcode(0xC9),   // fxch
9879               Opcode(0xD9), Opcode(0xF1));  // fyl2x
9880 
9881   ins_pipe( pipe_slow );
9882 %}
9883 
9884 instruct log10D_reg(regD dst, regD src, eFlagsReg cr) %{
9885   predicate (UseSSE>=2);
9886   effect(KILL cr);
9887   match(Set dst (Log10D src));
9888   // fldlg2       ; push log_10(2) on the FPU stack; full 80-bit number
9889   // fyl2x        ; compute log_10(2) * log_2(x)
9890   format %{ "FLDLG2 \t\t\t#Log10\n\t"
9891             "FYL2X  \t\t\t# Q=Log10*Log_2(x)"
9892          %}
9893   ins_encode( Opcode(0xD9), Opcode(0xEC),   // fldlg2
9894               Push_SrcD(src),
9895               Opcode(0xD9), Opcode(0xF1),   // fyl2x
9896               Push_ResultD(dst));
9897 
9898   ins_pipe( pipe_slow );
9899 %}
9900 
9901 instruct logDPR_reg(regDPR1 dst, regDPR1 src) %{
9902   predicate (UseSSE<=1);
9903   // The source Double operand on FPU stack
9904   match(Set dst (LogD src));
9905   // fldln2       ; push log_e(2) on the FPU stack; full 80-bit number
9906   // fxch         ; swap ST(0) with ST(1)
9907   // fyl2x        ; compute log_e(2) * log_2(x)
9908   format %{ "FLDLN2 \t\t\t#Log_e\n\t"
9909             "FXCH   \n\t"
9910             "FYL2X  \t\t\t# Q=Log_e*Log_2(x)"
9911          %}
9912   ins_encode( Opcode(0xD9), Opcode(0xED),   // fldln2
9913               Opcode(0xD9), Opcode(0xC9),   // fxch
9914               Opcode(0xD9), Opcode(0xF1));  // fyl2x
9915 
9916   ins_pipe( pipe_slow );
9917 %}
9918 
9919 instruct logD_reg(regD dst, regD src, eFlagsReg cr) %{
9920   predicate (UseSSE>=2);
9921   effect(KILL cr);
9922   // The source and result Double operands in XMM registers
9923   match(Set dst (LogD src));
9924   // fldln2       ; push log_e(2) on the FPU stack; full 80-bit number
9925   // fyl2x        ; compute log_e(2) * log_2(x)
9926   format %{ "FLDLN2 \t\t\t#Log_e\n\t"
9927             "FYL2X  \t\t\t# Q=Log_e*Log_2(x)"
9928          %}
9929   ins_encode( Opcode(0xD9), Opcode(0xED),   // fldln2
9930               Push_SrcD(src),
9931               Opcode(0xD9), Opcode(0xF1),   // fyl2x
9932               Push_ResultD(dst));
9933   ins_pipe( pipe_slow );
9934 %}
9935 
9936 //-------------Float Instructions-------------------------------
9937 // Float Math
9938 
9939 // Code for float compare:
9940 //     fcompp();
9941 //     fwait(); fnstsw_ax();
9942 //     sahf();
9943 //     movl(dst, unordered_result);
9944 //     jcc(Assembler::parity, exit);
9945 //     movl(dst, less_result);
9946 //     jcc(Assembler::below, exit);
9947 //     movl(dst, equal_result);
9948 //     jcc(Assembler::equal, exit);
9949 //     movl(dst, greater_result);
9950 //   exit:
9951 
9952 // P6 version of float compare, sets condition codes in EFLAGS
9953 instruct cmpFPR_cc_P6(eFlagsRegU cr, regFPR src1, regFPR src2, eAXRegI rax) %{
9954   predicate(VM_Version::supports_cmov() && UseSSE == 0);
9955   match(Set cr (CmpF src1 src2));
9956   effect(KILL rax);
9957   ins_cost(150);
9958   format %{ "FLD    $src1\n\t"
9959             "FUCOMIP ST,$src2  // P6 instruction\n\t"
9960             "JNP    exit\n\t"
9961             "MOV    ah,1       // saw a NaN, set CF (treat as LT)\n\t"
9962             "SAHF\n"
9963      "exit:\tNOP               // avoid branch to branch" %}
9964   opcode(0xDF, 0x05); /* DF E8+i or DF /5 */
9965   ins_encode( Push_Reg_DPR(src1),
9966               OpcP, RegOpc(src2),
9967               cmpF_P6_fixup );
9968   ins_pipe( pipe_slow );
9969 %}
9970 
9971 instruct cmpFPR_cc_P6CF(eFlagsRegUCF cr, regFPR src1, regFPR src2) %{
9972   predicate(VM_Version::supports_cmov() && UseSSE == 0);
9973   match(Set cr (CmpF src1 src2));
9974   ins_cost(100);
9975   format %{ "FLD    $src1\n\t"
9976             "FUCOMIP ST,$src2  // P6 instruction" %}
9977   opcode(0xDF, 0x05); /* DF E8+i or DF /5 */
9978   ins_encode( Push_Reg_DPR(src1),
9979               OpcP, RegOpc(src2));
9980   ins_pipe( pipe_slow );
9981 %}
9982 
9983 
9984 // Compare & branch
9985 instruct cmpFPR_cc(eFlagsRegU cr, regFPR src1, regFPR src2, eAXRegI rax) %{
9986   predicate(UseSSE == 0);
9987   match(Set cr (CmpF src1 src2));
9988   effect(KILL rax);
9989   ins_cost(200);
9990   format %{ "FLD    $src1\n\t"
9991             "FCOMp  $src2\n\t"
9992             "FNSTSW AX\n\t"
9993             "TEST   AX,0x400\n\t"
9994             "JZ,s   flags\n\t"
9995             "MOV    AH,1\t# unordered treat as LT\n"
9996     "flags:\tSAHF" %}
9997   opcode(0xD8, 0x3); /* D8 D8+i or D8 /3 */
9998   ins_encode( Push_Reg_DPR(src1),
9999               OpcP, RegOpc(src2),
10000               fpu_flags);
10001   ins_pipe( pipe_slow );
10002 %}
10003 
10004 // Compare vs zero into -1,0,1
10005 instruct cmpFPR_0(rRegI dst, regFPR src1, immFPR0 zero, eAXRegI rax, eFlagsReg cr) %{
10006   predicate(UseSSE == 0);
10007   match(Set dst (CmpF3 src1 zero));
10008   effect(KILL cr, KILL rax);
10009   ins_cost(280);
10010   format %{ "FTSTF  $dst,$src1" %}
10011   opcode(0xE4, 0xD9);
10012   ins_encode( Push_Reg_DPR(src1),
10013               OpcS, OpcP, PopFPU,
10014               CmpF_Result(dst));
10015   ins_pipe( pipe_slow );
10016 %}
10017 
10018 // Compare into -1,0,1
10019 instruct cmpFPR_reg(rRegI dst, regFPR src1, regFPR src2, eAXRegI rax, eFlagsReg cr) %{
10020   predicate(UseSSE == 0);
10021   match(Set dst (CmpF3 src1 src2));
10022   effect(KILL cr, KILL rax);
10023   ins_cost(300);
10024   format %{ "FCMPF  $dst,$src1,$src2" %}
10025   opcode(0xD8, 0x3); /* D8 D8+i or D8 /3 */
10026   ins_encode( Push_Reg_DPR(src1),
10027               OpcP, RegOpc(src2),
10028               CmpF_Result(dst));
10029   ins_pipe( pipe_slow );
10030 %}
10031 
10032 // float compare and set condition codes in EFLAGS by XMM regs
10033 instruct cmpF_cc(eFlagsRegU cr, regF src1, regF src2) %{
10034   predicate(UseSSE>=1);
10035   match(Set cr (CmpF src1 src2));
10036   ins_cost(145);
10037   format %{ "UCOMISS $src1,$src2\n\t"
10038             "JNP,s   exit\n\t"
10039             "PUSHF\t# saw NaN, set CF\n\t"
10040             "AND     [rsp], #0xffffff2b\n\t"
10041             "POPF\n"
10042     "exit:" %}
10043   ins_encode %{
10044     __ ucomiss($src1$$XMMRegister, $src2$$XMMRegister);
10045     emit_cmpfp_fixup(_masm);
10046   %}
10047   ins_pipe( pipe_slow );
10048 %}
10049 
10050 instruct cmpF_ccCF(eFlagsRegUCF cr, regF src1, regF src2) %{
10051   predicate(UseSSE>=1);
10052   match(Set cr (CmpF src1 src2));
10053   ins_cost(100);
10054   format %{ "UCOMISS $src1,$src2" %}
10055   ins_encode %{
10056     __ ucomiss($src1$$XMMRegister, $src2$$XMMRegister);
10057   %}
10058   ins_pipe( pipe_slow );
10059 %}
10060 
10061 // float compare and set condition codes in EFLAGS by XMM regs
10062 instruct cmpF_ccmem(eFlagsRegU cr, regF src1, memory src2) %{
10063   predicate(UseSSE>=1);
10064   match(Set cr (CmpF src1 (LoadF src2)));
10065   ins_cost(165);
10066   format %{ "UCOMISS $src1,$src2\n\t"
10067             "JNP,s   exit\n\t"
10068             "PUSHF\t# saw NaN, set CF\n\t"
10069             "AND     [rsp], #0xffffff2b\n\t"
10070             "POPF\n"
10071     "exit:" %}
10072   ins_encode %{
10073     __ ucomiss($src1$$XMMRegister, $src2$$Address);
10074     emit_cmpfp_fixup(_masm);
10075   %}
10076   ins_pipe( pipe_slow );
10077 %}
10078 
10079 instruct cmpF_ccmemCF(eFlagsRegUCF cr, regF src1, memory src2) %{
10080   predicate(UseSSE>=1);
10081   match(Set cr (CmpF src1 (LoadF src2)));
10082   ins_cost(100);
10083   format %{ "UCOMISS $src1,$src2" %}
10084   ins_encode %{
10085     __ ucomiss($src1$$XMMRegister, $src2$$Address);
10086   %}
10087   ins_pipe( pipe_slow );
10088 %}
10089 
10090 // Compare into -1,0,1 in XMM
10091 instruct cmpF_reg(xRegI dst, regF src1, regF src2, eFlagsReg cr) %{
10092   predicate(UseSSE>=1);
10093   match(Set dst (CmpF3 src1 src2));
10094   effect(KILL cr);
10095   ins_cost(255);
10096   format %{ "UCOMISS $src1, $src2\n\t"
10097             "MOV     $dst, #-1\n\t"
10098             "JP,s    done\n\t"
10099             "JB,s    done\n\t"
10100             "SETNE   $dst\n\t"
10101             "MOVZB   $dst, $dst\n"
10102     "done:" %}
10103   ins_encode %{
10104     __ ucomiss($src1$$XMMRegister, $src2$$XMMRegister);
10105     emit_cmpfp3(_masm, $dst$$Register);
10106   %}
10107   ins_pipe( pipe_slow );
10108 %}
10109 
10110 // Compare into -1,0,1 in XMM and memory
10111 instruct cmpF_regmem(xRegI dst, regF src1, memory src2, eFlagsReg cr) %{
10112   predicate(UseSSE>=1);
10113   match(Set dst (CmpF3 src1 (LoadF src2)));
10114   effect(KILL cr);
10115   ins_cost(275);
10116   format %{ "UCOMISS $src1, $src2\n\t"
10117             "MOV     $dst, #-1\n\t"
10118             "JP,s    done\n\t"
10119             "JB,s    done\n\t"
10120             "SETNE   $dst\n\t"
10121             "MOVZB   $dst, $dst\n"
10122     "done:" %}
10123   ins_encode %{
10124     __ ucomiss($src1$$XMMRegister, $src2$$Address);
10125     emit_cmpfp3(_masm, $dst$$Register);
10126   %}
10127   ins_pipe( pipe_slow );
10128 %}
10129 
10130 // Spill to obtain 24-bit precision
10131 instruct subFPR24_reg(stackSlotF dst, regFPR src1, regFPR src2) %{
10132   predicate(UseSSE==0 && Compile::current()->select_24_bit_instr());
10133   match(Set dst (SubF src1 src2));
10134 
10135   format %{ "FSUB   $dst,$src1 - $src2" %}
10136   opcode(0xD8, 0x4); /* D8 E0+i or D8 /4 mod==0x3 ;; result in TOS */
10137   ins_encode( Push_Reg_FPR(src1),
10138               OpcReg_FPR(src2),
10139               Pop_Mem_FPR(dst) );
10140   ins_pipe( fpu_mem_reg_reg );
10141 %}
10142 //
10143 // This instruction does not round to 24-bits
10144 instruct subFPR_reg(regFPR dst, regFPR src) %{
10145   predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr());
10146   match(Set dst (SubF dst src));
10147 
10148   format %{ "FSUB   $dst,$src" %}
10149   opcode(0xDE, 0x5); /* DE E8+i  or DE /5 */
10150   ins_encode( Push_Reg_FPR(src),
10151               OpcP, RegOpc(dst) );
10152   ins_pipe( fpu_reg_reg );
10153 %}
10154 
10155 // Spill to obtain 24-bit precision
10156 instruct addFPR24_reg(stackSlotF dst, regFPR src1, regFPR src2) %{
10157   predicate(UseSSE==0 && Compile::current()->select_24_bit_instr());
10158   match(Set dst (AddF src1 src2));
10159 
10160   format %{ "FADD   $dst,$src1,$src2" %}
10161   opcode(0xD8, 0x0); /* D8 C0+i */
10162   ins_encode( Push_Reg_FPR(src2),
10163               OpcReg_FPR(src1),
10164               Pop_Mem_FPR(dst) );
10165   ins_pipe( fpu_mem_reg_reg );
10166 %}
10167 //
10168 // This instruction does not round to 24-bits
10169 instruct addFPR_reg(regFPR dst, regFPR src) %{
10170   predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr());
10171   match(Set dst (AddF dst src));
10172 
10173   format %{ "FLD    $src\n\t"
10174             "FADDp  $dst,ST" %}
10175   opcode(0xDE, 0x0); /* DE C0+i or DE /0*/
10176   ins_encode( Push_Reg_FPR(src),
10177               OpcP, RegOpc(dst) );
10178   ins_pipe( fpu_reg_reg );
10179 %}
10180 
10181 instruct absFPR_reg(regFPR1 dst, regFPR1 src) %{
10182   predicate(UseSSE==0);
10183   match(Set dst (AbsF src));
10184   ins_cost(100);
10185   format %{ "FABS" %}
10186   opcode(0xE1, 0xD9);
10187   ins_encode( OpcS, OpcP );
10188   ins_pipe( fpu_reg_reg );
10189 %}
10190 
10191 instruct negFPR_reg(regFPR1 dst, regFPR1 src) %{
10192   predicate(UseSSE==0);
10193   match(Set dst (NegF src));
10194   ins_cost(100);
10195   format %{ "FCHS" %}
10196   opcode(0xE0, 0xD9);
10197   ins_encode( OpcS, OpcP );
10198   ins_pipe( fpu_reg_reg );
10199 %}
10200 
10201 // Cisc-alternate to addFPR_reg
10202 // Spill to obtain 24-bit precision
10203 instruct addFPR24_reg_mem(stackSlotF dst, regFPR src1, memory src2) %{
10204   predicate(UseSSE==0 && Compile::current()->select_24_bit_instr());
10205   match(Set dst (AddF src1 (LoadF src2)));
10206 
10207   format %{ "FLD    $src2\n\t"
10208             "FADD   ST,$src1\n\t"
10209             "FSTP_S $dst" %}
10210   opcode(0xD8, 0x0, 0xD9); /* D8 C0+i */  /* LoadF  D9 /0 */
10211   ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src2),
10212               OpcReg_FPR(src1),
10213               Pop_Mem_FPR(dst) );
10214   ins_pipe( fpu_mem_reg_mem );
10215 %}
10216 //
10217 // Cisc-alternate to addFPR_reg
10218 // This instruction does not round to 24-bits
10219 instruct addFPR_reg_mem(regFPR dst, memory src) %{
10220   predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr());
10221   match(Set dst (AddF dst (LoadF src)));
10222 
10223   format %{ "FADD   $dst,$src" %}
10224   opcode(0xDE, 0x0, 0xD9); /* DE C0+i or DE /0*/  /* LoadF  D9 /0 */
10225   ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src),
10226               OpcP, RegOpc(dst) );
10227   ins_pipe( fpu_reg_mem );
10228 %}
10229 
10230 // // Following two instructions for _222_mpegaudio
10231 // Spill to obtain 24-bit precision
10232 instruct addFPR24_mem_reg(stackSlotF dst, regFPR src2, memory src1 ) %{
10233   predicate(UseSSE==0 && Compile::current()->select_24_bit_instr());
10234   match(Set dst (AddF src1 src2));
10235 
10236   format %{ "FADD   $dst,$src1,$src2" %}
10237   opcode(0xD8, 0x0, 0xD9); /* D8 C0+i */  /* LoadF  D9 /0 */
10238   ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src1),
10239               OpcReg_FPR(src2),
10240               Pop_Mem_FPR(dst) );
10241   ins_pipe( fpu_mem_reg_mem );
10242 %}
10243 
10244 // Cisc-spill variant
10245 // Spill to obtain 24-bit precision
10246 instruct addFPR24_mem_cisc(stackSlotF dst, memory src1, memory src2) %{
10247   predicate(UseSSE==0 && Compile::current()->select_24_bit_instr());
10248   match(Set dst (AddF src1 (LoadF src2)));
10249 
10250   format %{ "FADD   $dst,$src1,$src2 cisc" %}
10251   opcode(0xD8, 0x0, 0xD9); /* D8 C0+i */  /* LoadF  D9 /0 */
10252   ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src2),
10253               set_instruction_start,
10254               OpcP, RMopc_Mem(secondary,src1),
10255               Pop_Mem_FPR(dst) );
10256   ins_pipe( fpu_mem_mem_mem );
10257 %}
10258 
10259 // Spill to obtain 24-bit precision
10260 instruct addFPR24_mem_mem(stackSlotF dst, memory src1, memory src2) %{
10261   predicate(UseSSE==0 && Compile::current()->select_24_bit_instr());
10262   match(Set dst (AddF src1 src2));
10263 
10264   format %{ "FADD   $dst,$src1,$src2" %}
10265   opcode(0xD8, 0x0, 0xD9); /* D8 /0 */  /* LoadF  D9 /0 */
10266   ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src2),
10267               set_instruction_start,
10268               OpcP, RMopc_Mem(secondary,src1),
10269               Pop_Mem_FPR(dst) );
10270   ins_pipe( fpu_mem_mem_mem );
10271 %}
10272 
10273 
10274 // Spill to obtain 24-bit precision
10275 instruct addFPR24_reg_imm(stackSlotF dst, regFPR src, immFPR con) %{
10276   predicate(UseSSE==0 && Compile::current()->select_24_bit_instr());
10277   match(Set dst (AddF src con));
10278   format %{ "FLD    $src\n\t"
10279             "FADD_S [$constantaddress]\t# load from constant table: float=$con\n\t"
10280             "FSTP_S $dst"  %}
10281   ins_encode %{
10282     __ fld_s($src$$reg - 1);  // FLD ST(i-1)
10283     __ fadd_s($constantaddress($con));
10284     __ fstp_s(Address(rsp, $dst$$disp));
10285   %}
10286   ins_pipe(fpu_mem_reg_con);
10287 %}
10288 //
10289 // This instruction does not round to 24-bits
10290 instruct addFPR_reg_imm(regFPR dst, regFPR src, immFPR con) %{
10291   predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr());
10292   match(Set dst (AddF src con));
10293   format %{ "FLD    $src\n\t"
10294             "FADD_S [$constantaddress]\t# load from constant table: float=$con\n\t"
10295             "FSTP   $dst"  %}
10296   ins_encode %{
10297     __ fld_s($src$$reg - 1);  // FLD ST(i-1)
10298     __ fadd_s($constantaddress($con));
10299     __ fstp_d($dst$$reg);
10300   %}
10301   ins_pipe(fpu_reg_reg_con);
10302 %}
10303 
10304 // Spill to obtain 24-bit precision
10305 instruct mulFPR24_reg(stackSlotF dst, regFPR src1, regFPR src2) %{
10306   predicate(UseSSE==0 && Compile::current()->select_24_bit_instr());
10307   match(Set dst (MulF src1 src2));
10308 
10309   format %{ "FLD    $src1\n\t"
10310             "FMUL   $src2\n\t"
10311             "FSTP_S $dst"  %}
10312   opcode(0xD8, 0x1); /* D8 C8+i or D8 /1 ;; result in TOS */
10313   ins_encode( Push_Reg_FPR(src1),
10314               OpcReg_FPR(src2),
10315               Pop_Mem_FPR(dst) );
10316   ins_pipe( fpu_mem_reg_reg );
10317 %}
10318 //
10319 // This instruction does not round to 24-bits
10320 instruct mulFPR_reg(regFPR dst, regFPR src1, regFPR src2) %{
10321   predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr());
10322   match(Set dst (MulF src1 src2));
10323 
10324   format %{ "FLD    $src1\n\t"
10325             "FMUL   $src2\n\t"
10326             "FSTP_S $dst"  %}
10327   opcode(0xD8, 0x1); /* D8 C8+i */
10328   ins_encode( Push_Reg_FPR(src2),
10329               OpcReg_FPR(src1),
10330               Pop_Reg_FPR(dst) );
10331   ins_pipe( fpu_reg_reg_reg );
10332 %}
10333 
10334 
10335 // Spill to obtain 24-bit precision
10336 // Cisc-alternate to reg-reg multiply
10337 instruct mulFPR24_reg_mem(stackSlotF dst, regFPR src1, memory src2) %{
10338   predicate(UseSSE==0 && Compile::current()->select_24_bit_instr());
10339   match(Set dst (MulF src1 (LoadF src2)));
10340 
10341   format %{ "FLD_S  $src2\n\t"
10342             "FMUL   $src1\n\t"
10343             "FSTP_S $dst"  %}
10344   opcode(0xD8, 0x1, 0xD9); /* D8 C8+i or DE /1*/  /* LoadF D9 /0 */
10345   ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src2),
10346               OpcReg_FPR(src1),
10347               Pop_Mem_FPR(dst) );
10348   ins_pipe( fpu_mem_reg_mem );
10349 %}
10350 //
10351 // This instruction does not round to 24-bits
10352 // Cisc-alternate to reg-reg multiply
10353 instruct mulFPR_reg_mem(regFPR dst, regFPR src1, memory src2) %{
10354   predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr());
10355   match(Set dst (MulF src1 (LoadF src2)));
10356 
10357   format %{ "FMUL   $dst,$src1,$src2" %}
10358   opcode(0xD8, 0x1, 0xD9); /* D8 C8+i */  /* LoadF D9 /0 */
10359   ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src2),
10360               OpcReg_FPR(src1),
10361               Pop_Reg_FPR(dst) );
10362   ins_pipe( fpu_reg_reg_mem );
10363 %}
10364 
10365 // Spill to obtain 24-bit precision
10366 instruct mulFPR24_mem_mem(stackSlotF dst, memory src1, memory src2) %{
10367   predicate(UseSSE==0 && Compile::current()->select_24_bit_instr());
10368   match(Set dst (MulF src1 src2));
10369 
10370   format %{ "FMUL   $dst,$src1,$src2" %}
10371   opcode(0xD8, 0x1, 0xD9); /* D8 /1 */  /* LoadF D9 /0 */
10372   ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src2),
10373               set_instruction_start,
10374               OpcP, RMopc_Mem(secondary,src1),
10375               Pop_Mem_FPR(dst) );
10376   ins_pipe( fpu_mem_mem_mem );
10377 %}
10378 
10379 // Spill to obtain 24-bit precision
10380 instruct mulFPR24_reg_imm(stackSlotF dst, regFPR src, immFPR con) %{
10381   predicate(UseSSE==0 && Compile::current()->select_24_bit_instr());
10382   match(Set dst (MulF src con));
10383 
10384   format %{ "FLD    $src\n\t"
10385             "FMUL_S [$constantaddress]\t# load from constant table: float=$con\n\t"
10386             "FSTP_S $dst"  %}
10387   ins_encode %{
10388     __ fld_s($src$$reg - 1);  // FLD ST(i-1)
10389     __ fmul_s($constantaddress($con));
10390     __ fstp_s(Address(rsp, $dst$$disp));
10391   %}
10392   ins_pipe(fpu_mem_reg_con);
10393 %}
10394 //
10395 // This instruction does not round to 24-bits
10396 instruct mulFPR_reg_imm(regFPR dst, regFPR src, immFPR con) %{
10397   predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr());
10398   match(Set dst (MulF src con));
10399 
10400   format %{ "FLD    $src\n\t"
10401             "FMUL_S [$constantaddress]\t# load from constant table: float=$con\n\t"
10402             "FSTP   $dst"  %}
10403   ins_encode %{
10404     __ fld_s($src$$reg - 1);  // FLD ST(i-1)
10405     __ fmul_s($constantaddress($con));
10406     __ fstp_d($dst$$reg);
10407   %}
10408   ins_pipe(fpu_reg_reg_con);
10409 %}
10410 
10411 
10412 //
10413 // MACRO1 -- subsume unshared load into mulFPR
10414 // This instruction does not round to 24-bits
10415 instruct mulFPR_reg_load1(regFPR dst, regFPR src, memory mem1 ) %{
10416   predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr());
10417   match(Set dst (MulF (LoadF mem1) src));
10418 
10419   format %{ "FLD    $mem1    ===MACRO1===\n\t"
10420             "FMUL   ST,$src\n\t"
10421             "FSTP   $dst" %}
10422   opcode(0xD8, 0x1, 0xD9); /* D8 C8+i or D8 /1 */  /* LoadF D9 /0 */
10423   ins_encode( Opcode(tertiary), RMopc_Mem(0x00,mem1),
10424               OpcReg_FPR(src),
10425               Pop_Reg_FPR(dst) );
10426   ins_pipe( fpu_reg_reg_mem );
10427 %}
10428 //
10429 // MACRO2 -- addFPR a mulFPR which subsumed an unshared load
10430 // This instruction does not round to 24-bits
10431 instruct addFPR_mulFPR_reg_load1(regFPR dst, memory mem1, regFPR src1, regFPR src2) %{
10432   predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr());
10433   match(Set dst (AddF (MulF (LoadF mem1) src1) src2));
10434   ins_cost(95);
10435 
10436   format %{ "FLD    $mem1     ===MACRO2===\n\t"
10437             "FMUL   ST,$src1  subsume mulFPR left load\n\t"
10438             "FADD   ST,$src2\n\t"
10439             "FSTP   $dst" %}
10440   opcode(0xD9); /* LoadF D9 /0 */
10441   ins_encode( OpcP, RMopc_Mem(0x00,mem1),
10442               FMul_ST_reg(src1),
10443               FAdd_ST_reg(src2),
10444               Pop_Reg_FPR(dst) );
10445   ins_pipe( fpu_reg_mem_reg_reg );
10446 %}
10447 
10448 // MACRO3 -- addFPR a mulFPR
10449 // This instruction does not round to 24-bits.  It is a '2-address'
10450 // instruction in that the result goes back to src2.  This eliminates
10451 // a move from the macro; possibly the register allocator will have
10452 // to add it back (and maybe not).
10453 instruct addFPR_mulFPR_reg(regFPR src2, regFPR src1, regFPR src0) %{
10454   predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr());
10455   match(Set src2 (AddF (MulF src0 src1) src2));
10456 
10457   format %{ "FLD    $src0     ===MACRO3===\n\t"
10458             "FMUL   ST,$src1\n\t"
10459             "FADDP  $src2,ST" %}
10460   opcode(0xD9); /* LoadF D9 /0 */
10461   ins_encode( Push_Reg_FPR(src0),
10462               FMul_ST_reg(src1),
10463               FAddP_reg_ST(src2) );
10464   ins_pipe( fpu_reg_reg_reg );
10465 %}
10466 
10467 // MACRO4 -- divFPR subFPR
10468 // This instruction does not round to 24-bits
10469 instruct subFPR_divFPR_reg(regFPR dst, regFPR src1, regFPR src2, regFPR src3) %{
10470   predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr());
10471   match(Set dst (DivF (SubF src2 src1) src3));
10472 
10473   format %{ "FLD    $src2   ===MACRO4===\n\t"
10474             "FSUB   ST,$src1\n\t"
10475             "FDIV   ST,$src3\n\t"
10476             "FSTP  $dst" %}
10477   opcode(0xDE, 0x7); /* DE F8+i or DE /7*/
10478   ins_encode( Push_Reg_FPR(src2),
10479               subFPR_divFPR_encode(src1,src3),
10480               Pop_Reg_FPR(dst) );
10481   ins_pipe( fpu_reg_reg_reg_reg );
10482 %}
10483 
10484 // Spill to obtain 24-bit precision
10485 instruct divFPR24_reg(stackSlotF dst, regFPR src1, regFPR src2) %{
10486   predicate(UseSSE==0 && Compile::current()->select_24_bit_instr());
10487   match(Set dst (DivF src1 src2));
10488 
10489   format %{ "FDIV   $dst,$src1,$src2" %}
10490   opcode(0xD8, 0x6); /* D8 F0+i or DE /6*/
10491   ins_encode( Push_Reg_FPR(src1),
10492               OpcReg_FPR(src2),
10493               Pop_Mem_FPR(dst) );
10494   ins_pipe( fpu_mem_reg_reg );
10495 %}
10496 //
10497 // This instruction does not round to 24-bits
10498 instruct divFPR_reg(regFPR dst, regFPR src) %{
10499   predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr());
10500   match(Set dst (DivF dst src));
10501 
10502   format %{ "FDIV   $dst,$src" %}
10503   opcode(0xDE, 0x7); /* DE F8+i or DE /7*/
10504   ins_encode( Push_Reg_FPR(src),
10505               OpcP, RegOpc(dst) );
10506   ins_pipe( fpu_reg_reg );
10507 %}
10508 
10509 
10510 // Spill to obtain 24-bit precision
10511 instruct modFPR24_reg(stackSlotF dst, regFPR src1, regFPR src2, eAXRegI rax, eFlagsReg cr) %{
10512   predicate( UseSSE==0 && Compile::current()->select_24_bit_instr());
10513   match(Set dst (ModF src1 src2));
10514   effect(KILL rax, KILL cr); // emitModDPR() uses EAX and EFLAGS
10515 
10516   format %{ "FMOD   $dst,$src1,$src2" %}
10517   ins_encode( Push_Reg_Mod_DPR(src1, src2),
10518               emitModDPR(),
10519               Push_Result_Mod_DPR(src2),
10520               Pop_Mem_FPR(dst));
10521   ins_pipe( pipe_slow );
10522 %}
10523 //
10524 // This instruction does not round to 24-bits
10525 instruct modFPR_reg(regFPR dst, regFPR src, eAXRegI rax, eFlagsReg cr) %{
10526   predicate( UseSSE==0 && !Compile::current()->select_24_bit_instr());
10527   match(Set dst (ModF dst src));
10528   effect(KILL rax, KILL cr); // emitModDPR() uses EAX and EFLAGS
10529 
10530   format %{ "FMOD   $dst,$src" %}
10531   ins_encode(Push_Reg_Mod_DPR(dst, src),
10532               emitModDPR(),
10533               Push_Result_Mod_DPR(src),
10534               Pop_Reg_FPR(dst));
10535   ins_pipe( pipe_slow );
10536 %}
10537 
10538 instruct modF_reg(regF dst, regF src0, regF src1, eAXRegI rax, eFlagsReg cr) %{
10539   predicate(UseSSE>=1);
10540   match(Set dst (ModF src0 src1));
10541   effect(KILL rax, KILL cr);
10542   format %{ "SUB    ESP,4\t # FMOD\n"
10543           "\tMOVSS  [ESP+0],$src1\n"
10544           "\tFLD_S  [ESP+0]\n"
10545           "\tMOVSS  [ESP+0],$src0\n"
10546           "\tFLD_S  [ESP+0]\n"
10547      "loop:\tFPREM\n"
10548           "\tFWAIT\n"
10549           "\tFNSTSW AX\n"
10550           "\tSAHF\n"
10551           "\tJP     loop\n"
10552           "\tFSTP_S [ESP+0]\n"
10553           "\tMOVSS  $dst,[ESP+0]\n"
10554           "\tADD    ESP,4\n"
10555           "\tFSTP   ST0\t # Restore FPU Stack"
10556     %}
10557   ins_cost(250);
10558   ins_encode( Push_ModF_encoding(src0, src1), emitModDPR(), Push_ResultF(dst,0x4), PopFPU);
10559   ins_pipe( pipe_slow );
10560 %}
10561 
10562 
10563 //----------Arithmetic Conversion Instructions---------------------------------
10564 // The conversions operations are all Alpha sorted.  Please keep it that way!
10565 
10566 instruct roundFloat_mem_reg(stackSlotF dst, regFPR src) %{
10567   predicate(UseSSE==0);
10568   match(Set dst (RoundFloat src));
10569   ins_cost(125);
10570   format %{ "FST_S  $dst,$src\t# F-round" %}
10571   ins_encode( Pop_Mem_Reg_FPR(dst, src) );
10572   ins_pipe( fpu_mem_reg );
10573 %}
10574 
10575 instruct roundDouble_mem_reg(stackSlotD dst, regDPR src) %{
10576   predicate(UseSSE<=1);
10577   match(Set dst (RoundDouble src));
10578   ins_cost(125);
10579   format %{ "FST_D  $dst,$src\t# D-round" %}
10580   ins_encode( Pop_Mem_Reg_DPR(dst, src) );
10581   ins_pipe( fpu_mem_reg );
10582 %}
10583 
10584 // Force rounding to 24-bit precision and 6-bit exponent
10585 instruct convDPR2FPR_reg(stackSlotF dst, regDPR src) %{
10586   predicate(UseSSE==0);
10587   match(Set dst (ConvD2F src));
10588   format %{ "FST_S  $dst,$src\t# F-round" %}
10589   expand %{
10590     roundFloat_mem_reg(dst,src);
10591   %}
10592 %}
10593 
10594 // Force rounding to 24-bit precision and 6-bit exponent
10595 instruct convDPR2F_reg(regF dst, regDPR src, eFlagsReg cr) %{
10596   predicate(UseSSE==1);
10597   match(Set dst (ConvD2F src));
10598   effect( KILL cr );
10599   format %{ "SUB    ESP,4\n\t"
10600             "FST_S  [ESP],$src\t# F-round\n\t"
10601             "MOVSS  $dst,[ESP]\n\t"
10602             "ADD ESP,4" %}
10603   ins_encode %{
10604     __ subptr(rsp, 4);
10605     if ($src$$reg != FPR1L_enc) {
10606       __ fld_s($src$$reg-1);
10607       __ fstp_s(Address(rsp, 0));
10608     } else {
10609       __ fst_s(Address(rsp, 0));
10610     }
10611     __ movflt($dst$$XMMRegister, Address(rsp, 0));
10612     __ addptr(rsp, 4);
10613   %}
10614   ins_pipe( pipe_slow );
10615 %}
10616 
10617 // Force rounding double precision to single precision
10618 instruct convD2F_reg(regF dst, regD src) %{
10619   predicate(UseSSE>=2);
10620   match(Set dst (ConvD2F src));
10621   format %{ "CVTSD2SS $dst,$src\t# F-round" %}
10622   ins_encode %{
10623     __ cvtsd2ss ($dst$$XMMRegister, $src$$XMMRegister);
10624   %}
10625   ins_pipe( pipe_slow );
10626 %}
10627 
10628 instruct convFPR2DPR_reg_reg(regDPR dst, regFPR src) %{
10629   predicate(UseSSE==0);
10630   match(Set dst (ConvF2D src));
10631   format %{ "FST_S  $dst,$src\t# D-round" %}
10632   ins_encode( Pop_Reg_Reg_DPR(dst, src));
10633   ins_pipe( fpu_reg_reg );
10634 %}
10635 
10636 instruct convFPR2D_reg(stackSlotD dst, regFPR src) %{
10637   predicate(UseSSE==1);
10638   match(Set dst (ConvF2D src));
10639   format %{ "FST_D  $dst,$src\t# D-round" %}
10640   expand %{
10641     roundDouble_mem_reg(dst,src);
10642   %}
10643 %}
10644 
10645 instruct convF2DPR_reg(regDPR dst, regF src, eFlagsReg cr) %{
10646   predicate(UseSSE==1);
10647   match(Set dst (ConvF2D src));
10648   effect( KILL cr );
10649   format %{ "SUB    ESP,4\n\t"
10650             "MOVSS  [ESP] $src\n\t"
10651             "FLD_S  [ESP]\n\t"
10652             "ADD    ESP,4\n\t"
10653             "FSTP   $dst\t# D-round" %}
10654   ins_encode %{
10655     __ subptr(rsp, 4);
10656     __ movflt(Address(rsp, 0), $src$$XMMRegister);
10657     __ fld_s(Address(rsp, 0));
10658     __ addptr(rsp, 4);
10659     __ fstp_d($dst$$reg);
10660   %}
10661   ins_pipe( pipe_slow );
10662 %}
10663 
10664 instruct convF2D_reg(regD dst, regF src) %{
10665   predicate(UseSSE>=2);
10666   match(Set dst (ConvF2D src));
10667   format %{ "CVTSS2SD $dst,$src\t# D-round" %}
10668   ins_encode %{
10669     __ cvtss2sd ($dst$$XMMRegister, $src$$XMMRegister);
10670   %}
10671   ins_pipe( pipe_slow );
10672 %}
10673 
10674 // Convert a double to an int.  If the double is a NAN, stuff a zero in instead.
10675 instruct convDPR2I_reg_reg( eAXRegI dst, eDXRegI tmp, regDPR src, eFlagsReg cr ) %{
10676   predicate(UseSSE<=1);
10677   match(Set dst (ConvD2I src));
10678   effect( KILL tmp, KILL cr );
10679   format %{ "FLD    $src\t# Convert double to int \n\t"
10680             "FLDCW  trunc mode\n\t"
10681             "SUB    ESP,4\n\t"
10682             "FISTp  [ESP + #0]\n\t"
10683             "FLDCW  std/24-bit mode\n\t"
10684             "POP    EAX\n\t"
10685             "CMP    EAX,0x80000000\n\t"
10686             "JNE,s  fast\n\t"
10687             "FLD_D  $src\n\t"
10688             "CALL   d2i_wrapper\n"
10689       "fast:" %}
10690   ins_encode( Push_Reg_DPR(src), DPR2I_encoding(src) );
10691   ins_pipe( pipe_slow );
10692 %}
10693 
10694 // Convert a double to an int.  If the double is a NAN, stuff a zero in instead.
10695 instruct convD2I_reg_reg( eAXRegI dst, eDXRegI tmp, regD src, eFlagsReg cr ) %{
10696   predicate(UseSSE>=2);
10697   match(Set dst (ConvD2I src));
10698   effect( KILL tmp, KILL cr );
10699   format %{ "CVTTSD2SI $dst, $src\n\t"
10700             "CMP    $dst,0x80000000\n\t"
10701             "JNE,s  fast\n\t"
10702             "SUB    ESP, 8\n\t"
10703             "MOVSD  [ESP], $src\n\t"
10704             "FLD_D  [ESP]\n\t"
10705             "ADD    ESP, 8\n\t"
10706             "CALL   d2i_wrapper\n"
10707       "fast:" %}
10708   ins_encode %{
10709     Label fast;
10710     __ cvttsd2sil($dst$$Register, $src$$XMMRegister);
10711     __ cmpl($dst$$Register, 0x80000000);
10712     __ jccb(Assembler::notEqual, fast);
10713     __ subptr(rsp, 8);
10714     __ movdbl(Address(rsp, 0), $src$$XMMRegister);
10715     __ fld_d(Address(rsp, 0));
10716     __ addptr(rsp, 8);
10717     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::d2i_wrapper())));
10718     __ bind(fast);
10719   %}
10720   ins_pipe( pipe_slow );
10721 %}
10722 
10723 instruct convDPR2L_reg_reg( eADXRegL dst, regDPR src, eFlagsReg cr ) %{
10724   predicate(UseSSE<=1);
10725   match(Set dst (ConvD2L src));
10726   effect( KILL cr );
10727   format %{ "FLD    $src\t# Convert double to long\n\t"
10728             "FLDCW  trunc mode\n\t"
10729             "SUB    ESP,8\n\t"
10730             "FISTp  [ESP + #0]\n\t"
10731             "FLDCW  std/24-bit mode\n\t"
10732             "POP    EAX\n\t"
10733             "POP    EDX\n\t"
10734             "CMP    EDX,0x80000000\n\t"
10735             "JNE,s  fast\n\t"
10736             "TEST   EAX,EAX\n\t"
10737             "JNE,s  fast\n\t"
10738             "FLD    $src\n\t"
10739             "CALL   d2l_wrapper\n"
10740       "fast:" %}
10741   ins_encode( Push_Reg_DPR(src),  DPR2L_encoding(src) );
10742   ins_pipe( pipe_slow );
10743 %}
10744 
10745 // XMM lacks a float/double->long conversion, so use the old FPU stack.
10746 instruct convD2L_reg_reg( eADXRegL dst, regD src, eFlagsReg cr ) %{
10747   predicate (UseSSE>=2);
10748   match(Set dst (ConvD2L src));
10749   effect( KILL cr );
10750   format %{ "SUB    ESP,8\t# Convert double to long\n\t"
10751             "MOVSD  [ESP],$src\n\t"
10752             "FLD_D  [ESP]\n\t"
10753             "FLDCW  trunc mode\n\t"
10754             "FISTp  [ESP + #0]\n\t"
10755             "FLDCW  std/24-bit mode\n\t"
10756             "POP    EAX\n\t"
10757             "POP    EDX\n\t"
10758             "CMP    EDX,0x80000000\n\t"
10759             "JNE,s  fast\n\t"
10760             "TEST   EAX,EAX\n\t"
10761             "JNE,s  fast\n\t"
10762             "SUB    ESP,8\n\t"
10763             "MOVSD  [ESP],$src\n\t"
10764             "FLD_D  [ESP]\n\t"
10765             "ADD    ESP,8\n\t"
10766             "CALL   d2l_wrapper\n"
10767       "fast:" %}
10768   ins_encode %{
10769     Label fast;
10770     __ subptr(rsp, 8);
10771     __ movdbl(Address(rsp, 0), $src$$XMMRegister);
10772     __ fld_d(Address(rsp, 0));
10773     __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_trunc()));
10774     __ fistp_d(Address(rsp, 0));
10775     // Restore the rounding mode, mask the exception
10776     if (Compile::current()->in_24_bit_fp_mode()) {
10777       __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
10778     } else {
10779       __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
10780     }
10781     // Load the converted long, adjust CPU stack
10782     __ pop(rax);
10783     __ pop(rdx);
10784     __ cmpl(rdx, 0x80000000);
10785     __ jccb(Assembler::notEqual, fast);
10786     __ testl(rax, rax);
10787     __ jccb(Assembler::notEqual, fast);
10788     __ subptr(rsp, 8);
10789     __ movdbl(Address(rsp, 0), $src$$XMMRegister);
10790     __ fld_d(Address(rsp, 0));
10791     __ addptr(rsp, 8);
10792     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::d2l_wrapper())));
10793     __ bind(fast);
10794   %}
10795   ins_pipe( pipe_slow );
10796 %}
10797 
10798 // Convert a double to an int.  Java semantics require we do complex
10799 // manglations in the corner cases.  So we set the rounding mode to
10800 // 'zero', store the darned double down as an int, and reset the
10801 // rounding mode to 'nearest'.  The hardware stores a flag value down
10802 // if we would overflow or converted a NAN; we check for this and
10803 // and go the slow path if needed.
10804 instruct convFPR2I_reg_reg(eAXRegI dst, eDXRegI tmp, regFPR src, eFlagsReg cr ) %{
10805   predicate(UseSSE==0);
10806   match(Set dst (ConvF2I src));
10807   effect( KILL tmp, KILL cr );
10808   format %{ "FLD    $src\t# Convert float to int \n\t"
10809             "FLDCW  trunc mode\n\t"
10810             "SUB    ESP,4\n\t"
10811             "FISTp  [ESP + #0]\n\t"
10812             "FLDCW  std/24-bit mode\n\t"
10813             "POP    EAX\n\t"
10814             "CMP    EAX,0x80000000\n\t"
10815             "JNE,s  fast\n\t"
10816             "FLD    $src\n\t"
10817             "CALL   d2i_wrapper\n"
10818       "fast:" %}
10819   // DPR2I_encoding works for FPR2I
10820   ins_encode( Push_Reg_FPR(src), DPR2I_encoding(src) );
10821   ins_pipe( pipe_slow );
10822 %}
10823 
10824 // Convert a float in xmm to an int reg.
10825 instruct convF2I_reg(eAXRegI dst, eDXRegI tmp, regF src, eFlagsReg cr ) %{
10826   predicate(UseSSE>=1);
10827   match(Set dst (ConvF2I src));
10828   effect( KILL tmp, KILL cr );
10829   format %{ "CVTTSS2SI $dst, $src\n\t"
10830             "CMP    $dst,0x80000000\n\t"
10831             "JNE,s  fast\n\t"
10832             "SUB    ESP, 4\n\t"
10833             "MOVSS  [ESP], $src\n\t"
10834             "FLD    [ESP]\n\t"
10835             "ADD    ESP, 4\n\t"
10836             "CALL   d2i_wrapper\n"
10837       "fast:" %}
10838   ins_encode %{
10839     Label fast;
10840     __ cvttss2sil($dst$$Register, $src$$XMMRegister);
10841     __ cmpl($dst$$Register, 0x80000000);
10842     __ jccb(Assembler::notEqual, fast);
10843     __ subptr(rsp, 4);
10844     __ movflt(Address(rsp, 0), $src$$XMMRegister);
10845     __ fld_s(Address(rsp, 0));
10846     __ addptr(rsp, 4);
10847     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::d2i_wrapper())));
10848     __ bind(fast);
10849   %}
10850   ins_pipe( pipe_slow );
10851 %}
10852 
10853 instruct convFPR2L_reg_reg( eADXRegL dst, regFPR src, eFlagsReg cr ) %{
10854   predicate(UseSSE==0);
10855   match(Set dst (ConvF2L src));
10856   effect( KILL cr );
10857   format %{ "FLD    $src\t# Convert float to long\n\t"
10858             "FLDCW  trunc mode\n\t"
10859             "SUB    ESP,8\n\t"
10860             "FISTp  [ESP + #0]\n\t"
10861             "FLDCW  std/24-bit mode\n\t"
10862             "POP    EAX\n\t"
10863             "POP    EDX\n\t"
10864             "CMP    EDX,0x80000000\n\t"
10865             "JNE,s  fast\n\t"
10866             "TEST   EAX,EAX\n\t"
10867             "JNE,s  fast\n\t"
10868             "FLD    $src\n\t"
10869             "CALL   d2l_wrapper\n"
10870       "fast:" %}
10871   // DPR2L_encoding works for FPR2L
10872   ins_encode( Push_Reg_FPR(src), DPR2L_encoding(src) );
10873   ins_pipe( pipe_slow );
10874 %}
10875 
10876 // XMM lacks a float/double->long conversion, so use the old FPU stack.
10877 instruct convF2L_reg_reg( eADXRegL dst, regF src, eFlagsReg cr ) %{
10878   predicate (UseSSE>=1);
10879   match(Set dst (ConvF2L src));
10880   effect( KILL cr );
10881   format %{ "SUB    ESP,8\t# Convert float to long\n\t"
10882             "MOVSS  [ESP],$src\n\t"
10883             "FLD_S  [ESP]\n\t"
10884             "FLDCW  trunc mode\n\t"
10885             "FISTp  [ESP + #0]\n\t"
10886             "FLDCW  std/24-bit mode\n\t"
10887             "POP    EAX\n\t"
10888             "POP    EDX\n\t"
10889             "CMP    EDX,0x80000000\n\t"
10890             "JNE,s  fast\n\t"
10891             "TEST   EAX,EAX\n\t"
10892             "JNE,s  fast\n\t"
10893             "SUB    ESP,4\t# Convert float to long\n\t"
10894             "MOVSS  [ESP],$src\n\t"
10895             "FLD_S  [ESP]\n\t"
10896             "ADD    ESP,4\n\t"
10897             "CALL   d2l_wrapper\n"
10898       "fast:" %}
10899   ins_encode %{
10900     Label fast;
10901     __ subptr(rsp, 8);
10902     __ movflt(Address(rsp, 0), $src$$XMMRegister);
10903     __ fld_s(Address(rsp, 0));
10904     __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_trunc()));
10905     __ fistp_d(Address(rsp, 0));
10906     // Restore the rounding mode, mask the exception
10907     if (Compile::current()->in_24_bit_fp_mode()) {
10908       __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
10909     } else {
10910       __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
10911     }
10912     // Load the converted long, adjust CPU stack
10913     __ pop(rax);
10914     __ pop(rdx);
10915     __ cmpl(rdx, 0x80000000);
10916     __ jccb(Assembler::notEqual, fast);
10917     __ testl(rax, rax);
10918     __ jccb(Assembler::notEqual, fast);
10919     __ subptr(rsp, 4);
10920     __ movflt(Address(rsp, 0), $src$$XMMRegister);
10921     __ fld_s(Address(rsp, 0));
10922     __ addptr(rsp, 4);
10923     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::d2l_wrapper())));
10924     __ bind(fast);
10925   %}
10926   ins_pipe( pipe_slow );
10927 %}
10928 
10929 instruct convI2DPR_reg(regDPR dst, stackSlotI src) %{
10930   predicate( UseSSE<=1 );
10931   match(Set dst (ConvI2D src));
10932   format %{ "FILD   $src\n\t"
10933             "FSTP   $dst" %}
10934   opcode(0xDB, 0x0);  /* DB /0 */
10935   ins_encode(Push_Mem_I(src), Pop_Reg_DPR(dst));
10936   ins_pipe( fpu_reg_mem );
10937 %}
10938 
10939 instruct convI2D_reg(regD dst, rRegI src) %{
10940   predicate( UseSSE>=2 && !UseXmmI2D );
10941   match(Set dst (ConvI2D src));
10942   format %{ "CVTSI2SD $dst,$src" %}
10943   ins_encode %{
10944     __ cvtsi2sdl ($dst$$XMMRegister, $src$$Register);
10945   %}
10946   ins_pipe( pipe_slow );
10947 %}
10948 
10949 instruct convI2D_mem(regD dst, memory mem) %{
10950   predicate( UseSSE>=2 );
10951   match(Set dst (ConvI2D (LoadI mem)));
10952   format %{ "CVTSI2SD $dst,$mem" %}
10953   ins_encode %{
10954     __ cvtsi2sdl ($dst$$XMMRegister, $mem$$Address);
10955   %}
10956   ins_pipe( pipe_slow );
10957 %}
10958 
10959 instruct convXI2D_reg(regD dst, rRegI src)
10960 %{
10961   predicate( UseSSE>=2 && UseXmmI2D );
10962   match(Set dst (ConvI2D src));
10963 
10964   format %{ "MOVD  $dst,$src\n\t"
10965             "CVTDQ2PD $dst,$dst\t# i2d" %}
10966   ins_encode %{
10967     __ movdl($dst$$XMMRegister, $src$$Register);
10968     __ cvtdq2pd($dst$$XMMRegister, $dst$$XMMRegister);
10969   %}
10970   ins_pipe(pipe_slow); // XXX
10971 %}
10972 
10973 instruct convI2DPR_mem(regDPR dst, memory mem) %{
10974   predicate( UseSSE<=1 && !Compile::current()->select_24_bit_instr());
10975   match(Set dst (ConvI2D (LoadI mem)));
10976   format %{ "FILD   $mem\n\t"
10977             "FSTP   $dst" %}
10978   opcode(0xDB);      /* DB /0 */
10979   ins_encode( OpcP, RMopc_Mem(0x00,mem),
10980               Pop_Reg_DPR(dst));
10981   ins_pipe( fpu_reg_mem );
10982 %}
10983 
10984 // Convert a byte to a float; no rounding step needed.
10985 instruct conv24I2FPR_reg(regFPR dst, stackSlotI src) %{
10986   predicate( UseSSE==0 && n->in(1)->Opcode() == Op_AndI && n->in(1)->in(2)->is_Con() && n->in(1)->in(2)->get_int() == 255 );
10987   match(Set dst (ConvI2F src));
10988   format %{ "FILD   $src\n\t"
10989             "FSTP   $dst" %}
10990 
10991   opcode(0xDB, 0x0);  /* DB /0 */
10992   ins_encode(Push_Mem_I(src), Pop_Reg_FPR(dst));
10993   ins_pipe( fpu_reg_mem );
10994 %}
10995 
10996 // In 24-bit mode, force exponent rounding by storing back out
10997 instruct convI2FPR_SSF(stackSlotF dst, stackSlotI src) %{
10998   predicate( UseSSE==0 && Compile::current()->select_24_bit_instr());
10999   match(Set dst (ConvI2F src));
11000   ins_cost(200);
11001   format %{ "FILD   $src\n\t"
11002             "FSTP_S $dst" %}
11003   opcode(0xDB, 0x0);  /* DB /0 */
11004   ins_encode( Push_Mem_I(src),
11005               Pop_Mem_FPR(dst));
11006   ins_pipe( fpu_mem_mem );
11007 %}
11008 
11009 // In 24-bit mode, force exponent rounding by storing back out
11010 instruct convI2FPR_SSF_mem(stackSlotF dst, memory mem) %{
11011   predicate( UseSSE==0 && Compile::current()->select_24_bit_instr());
11012   match(Set dst (ConvI2F (LoadI mem)));
11013   ins_cost(200);
11014   format %{ "FILD   $mem\n\t"
11015             "FSTP_S $dst" %}
11016   opcode(0xDB);  /* DB /0 */
11017   ins_encode( OpcP, RMopc_Mem(0x00,mem),
11018               Pop_Mem_FPR(dst));
11019   ins_pipe( fpu_mem_mem );
11020 %}
11021 
11022 // This instruction does not round to 24-bits
11023 instruct convI2FPR_reg(regFPR dst, stackSlotI src) %{
11024   predicate( UseSSE==0 && !Compile::current()->select_24_bit_instr());
11025   match(Set dst (ConvI2F src));
11026   format %{ "FILD   $src\n\t"
11027             "FSTP   $dst" %}
11028   opcode(0xDB, 0x0);  /* DB /0 */
11029   ins_encode( Push_Mem_I(src),
11030               Pop_Reg_FPR(dst));
11031   ins_pipe( fpu_reg_mem );
11032 %}
11033 
11034 // This instruction does not round to 24-bits
11035 instruct convI2FPR_mem(regFPR dst, memory mem) %{
11036   predicate( UseSSE==0 && !Compile::current()->select_24_bit_instr());
11037   match(Set dst (ConvI2F (LoadI mem)));
11038   format %{ "FILD   $mem\n\t"
11039             "FSTP   $dst" %}
11040   opcode(0xDB);      /* DB /0 */
11041   ins_encode( OpcP, RMopc_Mem(0x00,mem),
11042               Pop_Reg_FPR(dst));
11043   ins_pipe( fpu_reg_mem );
11044 %}
11045 
11046 // Convert an int to a float in xmm; no rounding step needed.
11047 instruct convI2F_reg(regF dst, rRegI src) %{
11048   predicate( UseSSE==1 || UseSSE>=2 && !UseXmmI2F );
11049   match(Set dst (ConvI2F src));
11050   format %{ "CVTSI2SS $dst, $src" %}
11051   ins_encode %{
11052     __ cvtsi2ssl ($dst$$XMMRegister, $src$$Register);
11053   %}
11054   ins_pipe( pipe_slow );
11055 %}
11056 
11057  instruct convXI2F_reg(regF dst, rRegI src)
11058 %{
11059   predicate( UseSSE>=2 && UseXmmI2F );
11060   match(Set dst (ConvI2F src));
11061 
11062   format %{ "MOVD  $dst,$src\n\t"
11063             "CVTDQ2PS $dst,$dst\t# i2f" %}
11064   ins_encode %{
11065     __ movdl($dst$$XMMRegister, $src$$Register);
11066     __ cvtdq2ps($dst$$XMMRegister, $dst$$XMMRegister);
11067   %}
11068   ins_pipe(pipe_slow); // XXX
11069 %}
11070 
11071 instruct convI2L_reg( eRegL dst, rRegI src, eFlagsReg cr) %{
11072   match(Set dst (ConvI2L src));
11073   effect(KILL cr);
11074   ins_cost(375);
11075   format %{ "MOV    $dst.lo,$src\n\t"
11076             "MOV    $dst.hi,$src\n\t"
11077             "SAR    $dst.hi,31" %}
11078   ins_encode(convert_int_long(dst,src));
11079   ins_pipe( ialu_reg_reg_long );
11080 %}
11081 
11082 // Zero-extend convert int to long
11083 instruct convI2L_reg_zex(eRegL dst, rRegI src, immL_32bits mask, eFlagsReg flags ) %{
11084   match(Set dst (AndL (ConvI2L src) mask) );
11085   effect( KILL flags );
11086   ins_cost(250);
11087   format %{ "MOV    $dst.lo,$src\n\t"
11088             "XOR    $dst.hi,$dst.hi" %}
11089   opcode(0x33); // XOR
11090   ins_encode(enc_Copy(dst,src), OpcP, RegReg_Hi2(dst,dst) );
11091   ins_pipe( ialu_reg_reg_long );
11092 %}
11093 
11094 // Zero-extend long
11095 instruct zerox_long(eRegL dst, eRegL src, immL_32bits mask, eFlagsReg flags ) %{
11096   match(Set dst (AndL src mask) );
11097   effect( KILL flags );
11098   ins_cost(250);
11099   format %{ "MOV    $dst.lo,$src.lo\n\t"
11100             "XOR    $dst.hi,$dst.hi\n\t" %}
11101   opcode(0x33); // XOR
11102   ins_encode(enc_Copy(dst,src), OpcP, RegReg_Hi2(dst,dst) );
11103   ins_pipe( ialu_reg_reg_long );
11104 %}
11105 
11106 instruct convL2DPR_reg( stackSlotD dst, eRegL src, eFlagsReg cr) %{
11107   predicate (UseSSE<=1);
11108   match(Set dst (ConvL2D src));
11109   effect( KILL cr );
11110   format %{ "PUSH   $src.hi\t# Convert long to double\n\t"
11111             "PUSH   $src.lo\n\t"
11112             "FILD   ST,[ESP + #0]\n\t"
11113             "ADD    ESP,8\n\t"
11114             "FSTP_D $dst\t# D-round" %}
11115   opcode(0xDF, 0x5);  /* DF /5 */
11116   ins_encode(convert_long_double(src), Pop_Mem_DPR(dst));
11117   ins_pipe( pipe_slow );
11118 %}
11119 
11120 instruct convL2D_reg( regD dst, eRegL src, eFlagsReg cr) %{
11121   predicate (UseSSE>=2);
11122   match(Set dst (ConvL2D src));
11123   effect( KILL cr );
11124   format %{ "PUSH   $src.hi\t# Convert long to double\n\t"
11125             "PUSH   $src.lo\n\t"
11126             "FILD_D [ESP]\n\t"
11127             "FSTP_D [ESP]\n\t"
11128             "MOVSD  $dst,[ESP]\n\t"
11129             "ADD    ESP,8" %}
11130   opcode(0xDF, 0x5);  /* DF /5 */
11131   ins_encode(convert_long_double2(src), Push_ResultD(dst));
11132   ins_pipe( pipe_slow );
11133 %}
11134 
11135 instruct convL2F_reg( regF dst, eRegL src, eFlagsReg cr) %{
11136   predicate (UseSSE>=1);
11137   match(Set dst (ConvL2F src));
11138   effect( KILL cr );
11139   format %{ "PUSH   $src.hi\t# Convert long to single float\n\t"
11140             "PUSH   $src.lo\n\t"
11141             "FILD_D [ESP]\n\t"
11142             "FSTP_S [ESP]\n\t"
11143             "MOVSS  $dst,[ESP]\n\t"
11144             "ADD    ESP,8" %}
11145   opcode(0xDF, 0x5);  /* DF /5 */
11146   ins_encode(convert_long_double2(src), Push_ResultF(dst,0x8));
11147   ins_pipe( pipe_slow );
11148 %}
11149 
11150 instruct convL2FPR_reg( stackSlotF dst, eRegL src, eFlagsReg cr) %{
11151   match(Set dst (ConvL2F src));
11152   effect( KILL cr );
11153   format %{ "PUSH   $src.hi\t# Convert long to single float\n\t"
11154             "PUSH   $src.lo\n\t"
11155             "FILD   ST,[ESP + #0]\n\t"
11156             "ADD    ESP,8\n\t"
11157             "FSTP_S $dst\t# F-round" %}
11158   opcode(0xDF, 0x5);  /* DF /5 */
11159   ins_encode(convert_long_double(src), Pop_Mem_FPR(dst));
11160   ins_pipe( pipe_slow );
11161 %}
11162 
11163 instruct convL2I_reg( rRegI dst, eRegL src ) %{
11164   match(Set dst (ConvL2I src));
11165   effect( DEF dst, USE src );
11166   format %{ "MOV    $dst,$src.lo" %}
11167   ins_encode(enc_CopyL_Lo(dst,src));
11168   ins_pipe( ialu_reg_reg );
11169 %}
11170 
11171 
11172 instruct MoveF2I_stack_reg(rRegI dst, stackSlotF src) %{
11173   match(Set dst (MoveF2I src));
11174   effect( DEF dst, USE src );
11175   ins_cost(100);
11176   format %{ "MOV    $dst,$src\t# MoveF2I_stack_reg" %}
11177   ins_encode %{
11178     __ movl($dst$$Register, Address(rsp, $src$$disp));
11179   %}
11180   ins_pipe( ialu_reg_mem );
11181 %}
11182 
11183 instruct MoveFPR2I_reg_stack(stackSlotI dst, regFPR src) %{
11184   predicate(UseSSE==0);
11185   match(Set dst (MoveF2I src));
11186   effect( DEF dst, USE src );
11187 
11188   ins_cost(125);
11189   format %{ "FST_S  $dst,$src\t# MoveF2I_reg_stack" %}
11190   ins_encode( Pop_Mem_Reg_FPR(dst, src) );
11191   ins_pipe( fpu_mem_reg );
11192 %}
11193 
11194 instruct MoveF2I_reg_stack_sse(stackSlotI dst, regF src) %{
11195   predicate(UseSSE>=1);
11196   match(Set dst (MoveF2I src));
11197   effect( DEF dst, USE src );
11198 
11199   ins_cost(95);
11200   format %{ "MOVSS  $dst,$src\t# MoveF2I_reg_stack_sse" %}
11201   ins_encode %{
11202     __ movflt(Address(rsp, $dst$$disp), $src$$XMMRegister);
11203   %}
11204   ins_pipe( pipe_slow );
11205 %}
11206 
11207 instruct MoveF2I_reg_reg_sse(rRegI dst, regF src) %{
11208   predicate(UseSSE>=2);
11209   match(Set dst (MoveF2I src));
11210   effect( DEF dst, USE src );
11211   ins_cost(85);
11212   format %{ "MOVD   $dst,$src\t# MoveF2I_reg_reg_sse" %}
11213   ins_encode %{
11214     __ movdl($dst$$Register, $src$$XMMRegister);
11215   %}
11216   ins_pipe( pipe_slow );
11217 %}
11218 
11219 instruct MoveI2F_reg_stack(stackSlotF dst, rRegI src) %{
11220   match(Set dst (MoveI2F src));
11221   effect( DEF dst, USE src );
11222 
11223   ins_cost(100);
11224   format %{ "MOV    $dst,$src\t# MoveI2F_reg_stack" %}
11225   ins_encode %{
11226     __ movl(Address(rsp, $dst$$disp), $src$$Register);
11227   %}
11228   ins_pipe( ialu_mem_reg );
11229 %}
11230 
11231 
11232 instruct MoveI2FPR_stack_reg(regFPR dst, stackSlotI src) %{
11233   predicate(UseSSE==0);
11234   match(Set dst (MoveI2F src));
11235   effect(DEF dst, USE src);
11236 
11237   ins_cost(125);
11238   format %{ "FLD_S  $src\n\t"
11239             "FSTP   $dst\t# MoveI2F_stack_reg" %}
11240   opcode(0xD9);               /* D9 /0, FLD m32real */
11241   ins_encode( OpcP, RMopc_Mem_no_oop(0x00,src),
11242               Pop_Reg_FPR(dst) );
11243   ins_pipe( fpu_reg_mem );
11244 %}
11245 
11246 instruct MoveI2F_stack_reg_sse(regF dst, stackSlotI src) %{
11247   predicate(UseSSE>=1);
11248   match(Set dst (MoveI2F src));
11249   effect( DEF dst, USE src );
11250 
11251   ins_cost(95);
11252   format %{ "MOVSS  $dst,$src\t# MoveI2F_stack_reg_sse" %}
11253   ins_encode %{
11254     __ movflt($dst$$XMMRegister, Address(rsp, $src$$disp));
11255   %}
11256   ins_pipe( pipe_slow );
11257 %}
11258 
11259 instruct MoveI2F_reg_reg_sse(regF dst, rRegI src) %{
11260   predicate(UseSSE>=2);
11261   match(Set dst (MoveI2F src));
11262   effect( DEF dst, USE src );
11263 
11264   ins_cost(85);
11265   format %{ "MOVD   $dst,$src\t# MoveI2F_reg_reg_sse" %}
11266   ins_encode %{
11267     __ movdl($dst$$XMMRegister, $src$$Register);
11268   %}
11269   ins_pipe( pipe_slow );
11270 %}
11271 
11272 instruct MoveD2L_stack_reg(eRegL dst, stackSlotD src) %{
11273   match(Set dst (MoveD2L src));
11274   effect(DEF dst, USE src);
11275 
11276   ins_cost(250);
11277   format %{ "MOV    $dst.lo,$src\n\t"
11278             "MOV    $dst.hi,$src+4\t# MoveD2L_stack_reg" %}
11279   opcode(0x8B, 0x8B);
11280   ins_encode( OpcP, RegMem(dst,src), OpcS, RegMem_Hi(dst,src));
11281   ins_pipe( ialu_mem_long_reg );
11282 %}
11283 
11284 instruct MoveDPR2L_reg_stack(stackSlotL dst, regDPR src) %{
11285   predicate(UseSSE<=1);
11286   match(Set dst (MoveD2L src));
11287   effect(DEF dst, USE src);
11288 
11289   ins_cost(125);
11290   format %{ "FST_D  $dst,$src\t# MoveD2L_reg_stack" %}
11291   ins_encode( Pop_Mem_Reg_DPR(dst, src) );
11292   ins_pipe( fpu_mem_reg );
11293 %}
11294 
11295 instruct MoveD2L_reg_stack_sse(stackSlotL dst, regD src) %{
11296   predicate(UseSSE>=2);
11297   match(Set dst (MoveD2L src));
11298   effect(DEF dst, USE src);
11299   ins_cost(95);
11300   format %{ "MOVSD  $dst,$src\t# MoveD2L_reg_stack_sse" %}
11301   ins_encode %{
11302     __ movdbl(Address(rsp, $dst$$disp), $src$$XMMRegister);
11303   %}
11304   ins_pipe( pipe_slow );
11305 %}
11306 
11307 instruct MoveD2L_reg_reg_sse(eRegL dst, regD src, regD tmp) %{
11308   predicate(UseSSE>=2);
11309   match(Set dst (MoveD2L src));
11310   effect(DEF dst, USE src, TEMP tmp);
11311   ins_cost(85);
11312   format %{ "MOVD   $dst.lo,$src\n\t"
11313             "PSHUFLW $tmp,$src,0x4E\n\t"
11314             "MOVD   $dst.hi,$tmp\t# MoveD2L_reg_reg_sse" %}
11315   ins_encode %{
11316     __ movdl($dst$$Register, $src$$XMMRegister);
11317     __ pshuflw($tmp$$XMMRegister, $src$$XMMRegister, 0x4e);
11318     __ movdl(HIGH_FROM_LOW($dst$$Register), $tmp$$XMMRegister);
11319   %}
11320   ins_pipe( pipe_slow );
11321 %}
11322 
11323 instruct MoveL2D_reg_stack(stackSlotD dst, eRegL src) %{
11324   match(Set dst (MoveL2D src));
11325   effect(DEF dst, USE src);
11326 
11327   ins_cost(200);
11328   format %{ "MOV    $dst,$src.lo\n\t"
11329             "MOV    $dst+4,$src.hi\t# MoveL2D_reg_stack" %}
11330   opcode(0x89, 0x89);
11331   ins_encode( OpcP, RegMem( src, dst ), OpcS, RegMem_Hi( src, dst ) );
11332   ins_pipe( ialu_mem_long_reg );
11333 %}
11334 
11335 
11336 instruct MoveL2DPR_stack_reg(regDPR dst, stackSlotL src) %{
11337   predicate(UseSSE<=1);
11338   match(Set dst (MoveL2D src));
11339   effect(DEF dst, USE src);
11340   ins_cost(125);
11341 
11342   format %{ "FLD_D  $src\n\t"
11343             "FSTP   $dst\t# MoveL2D_stack_reg" %}
11344   opcode(0xDD);               /* DD /0, FLD m64real */
11345   ins_encode( OpcP, RMopc_Mem_no_oop(0x00,src),
11346               Pop_Reg_DPR(dst) );
11347   ins_pipe( fpu_reg_mem );
11348 %}
11349 
11350 
11351 instruct MoveL2D_stack_reg_sse(regD dst, stackSlotL src) %{
11352   predicate(UseSSE>=2 && UseXmmLoadAndClearUpper);
11353   match(Set dst (MoveL2D src));
11354   effect(DEF dst, USE src);
11355 
11356   ins_cost(95);
11357   format %{ "MOVSD  $dst,$src\t# MoveL2D_stack_reg_sse" %}
11358   ins_encode %{
11359     __ movdbl($dst$$XMMRegister, Address(rsp, $src$$disp));
11360   %}
11361   ins_pipe( pipe_slow );
11362 %}
11363 
11364 instruct MoveL2D_stack_reg_sse_partial(regD dst, stackSlotL src) %{
11365   predicate(UseSSE>=2 && !UseXmmLoadAndClearUpper);
11366   match(Set dst (MoveL2D src));
11367   effect(DEF dst, USE src);
11368 
11369   ins_cost(95);
11370   format %{ "MOVLPD $dst,$src\t# MoveL2D_stack_reg_sse" %}
11371   ins_encode %{
11372     __ movdbl($dst$$XMMRegister, Address(rsp, $src$$disp));
11373   %}
11374   ins_pipe( pipe_slow );
11375 %}
11376 
11377 instruct MoveL2D_reg_reg_sse(regD dst, eRegL src, regD tmp) %{
11378   predicate(UseSSE>=2);
11379   match(Set dst (MoveL2D src));
11380   effect(TEMP dst, USE src, TEMP tmp);
11381   ins_cost(85);
11382   format %{ "MOVD   $dst,$src.lo\n\t"
11383             "MOVD   $tmp,$src.hi\n\t"
11384             "PUNPCKLDQ $dst,$tmp\t# MoveL2D_reg_reg_sse" %}
11385   ins_encode %{
11386     __ movdl($dst$$XMMRegister, $src$$Register);
11387     __ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register));
11388     __ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister);
11389   %}
11390   ins_pipe( pipe_slow );
11391 %}
11392 
11393 
11394 // =======================================================================
11395 // fast clearing of an array
11396 instruct rep_stos(eCXRegI cnt, eDIRegP base, eAXRegI zero, Universe dummy, eFlagsReg cr) %{
11397   predicate(!UseFastStosb);
11398   match(Set dummy (ClearArray cnt base));
11399   effect(USE_KILL cnt, USE_KILL base, KILL zero, KILL cr);
11400   format %{ "XOR    EAX,EAX\t# ClearArray:\n\t"
11401             "SHL    ECX,1\t# Convert doublewords to words\n\t"
11402             "REP STOS\t# store EAX into [EDI++] while ECX--" %}
11403   ins_encode %{ 
11404     __ clear_mem($base$$Register, $cnt$$Register, $zero$$Register);
11405   %}
11406   ins_pipe( pipe_slow );
11407 %}
11408 
11409 instruct rep_fast_stosb(eCXRegI cnt, eDIRegP base, eAXRegI zero, Universe dummy, eFlagsReg cr) %{
11410   predicate(UseFastStosb);
11411   match(Set dummy (ClearArray cnt base));
11412   effect(USE_KILL cnt, USE_KILL base, KILL zero, KILL cr);
11413   format %{ "XOR    EAX,EAX\t# ClearArray:\n\t"
11414             "SHL    ECX,3\t# Convert doublewords to bytes\n\t"
11415             "REP STOSB\t# store EAX into [EDI++] while ECX--" %}
11416   ins_encode %{ 
11417     __ clear_mem($base$$Register, $cnt$$Register, $zero$$Register);
11418   %}
11419   ins_pipe( pipe_slow );
11420 %}
11421 
11422 instruct string_compare(eDIRegP str1, eCXRegI cnt1, eSIRegP str2, eDXRegI cnt2,
11423                         eAXRegI result, regD tmp1, eFlagsReg cr) %{
11424   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
11425   effect(TEMP tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
11426 
11427   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   // KILL $tmp1" %}
11428   ins_encode %{
11429     __ string_compare($str1$$Register, $str2$$Register,
11430                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
11431                       $tmp1$$XMMRegister);
11432   %}
11433   ins_pipe( pipe_slow );
11434 %}
11435 
11436 // fast string equals
11437 instruct string_equals(eDIRegP str1, eSIRegP str2, eCXRegI cnt, eAXRegI result,
11438                        regD tmp1, regD tmp2, eBXRegI tmp3, eFlagsReg cr) %{
11439   match(Set result (StrEquals (Binary str1 str2) cnt));
11440   effect(TEMP tmp1, TEMP tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL tmp3, KILL cr);
11441 
11442   format %{ "String Equals $str1,$str2,$cnt -> $result    // KILL $tmp1, $tmp2, $tmp3" %}
11443   ins_encode %{
11444     __ char_arrays_equals(false, $str1$$Register, $str2$$Register,
11445                           $cnt$$Register, $result$$Register, $tmp3$$Register,
11446                           $tmp1$$XMMRegister, $tmp2$$XMMRegister);
11447   %}
11448   ins_pipe( pipe_slow );
11449 %}
11450 
11451 // fast search of substring with known size.
11452 instruct string_indexof_con(eDIRegP str1, eDXRegI cnt1, eSIRegP str2, immI int_cnt2,
11453                             eBXRegI result, regD vec, eAXRegI cnt2, eCXRegI tmp, eFlagsReg cr) %{
11454   predicate(UseSSE42Intrinsics);
11455   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
11456   effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, KILL cnt2, KILL tmp, KILL cr);
11457 
11458   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result   // KILL $vec, $cnt1, $cnt2, $tmp" %}
11459   ins_encode %{
11460     int icnt2 = (int)$int_cnt2$$constant;
11461     if (icnt2 >= 8) {
11462       // IndexOf for constant substrings with size >= 8 elements
11463       // which don't need to be loaded through stack.
11464       __ string_indexofC8($str1$$Register, $str2$$Register,
11465                           $cnt1$$Register, $cnt2$$Register,
11466                           icnt2, $result$$Register,
11467                           $vec$$XMMRegister, $tmp$$Register);
11468     } else {
11469       // Small strings are loaded through stack if they cross page boundary.
11470       __ string_indexof($str1$$Register, $str2$$Register,
11471                         $cnt1$$Register, $cnt2$$Register,
11472                         icnt2, $result$$Register,
11473                         $vec$$XMMRegister, $tmp$$Register);
11474     }
11475   %}
11476   ins_pipe( pipe_slow );
11477 %}
11478 
11479 instruct string_indexof(eDIRegP str1, eDXRegI cnt1, eSIRegP str2, eAXRegI cnt2,
11480                         eBXRegI result, regD vec, eCXRegI tmp, eFlagsReg cr) %{
11481   predicate(UseSSE42Intrinsics);
11482   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
11483   effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp, KILL cr);
11484 
11485   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result   // KILL all" %}
11486   ins_encode %{
11487     __ string_indexof($str1$$Register, $str2$$Register,
11488                       $cnt1$$Register, $cnt2$$Register,
11489                       (-1), $result$$Register,
11490                       $vec$$XMMRegister, $tmp$$Register);
11491   %}
11492   ins_pipe( pipe_slow );
11493 %}
11494 
11495 // fast array equals
11496 instruct array_equals(eDIRegP ary1, eSIRegP ary2, eAXRegI result,
11497                       regD tmp1, regD tmp2, eCXRegI tmp3, eBXRegI tmp4, eFlagsReg cr)
11498 %{
11499   match(Set result (AryEq ary1 ary2));
11500   effect(TEMP tmp1, TEMP tmp2, USE_KILL ary1, USE_KILL ary2, KILL tmp3, KILL tmp4, KILL cr);
11501   //ins_cost(300);
11502 
11503   format %{ "Array Equals $ary1,$ary2 -> $result   // KILL $tmp1, $tmp2, $tmp3, $tmp4" %}
11504   ins_encode %{
11505     __ char_arrays_equals(true, $ary1$$Register, $ary2$$Register,
11506                           $tmp3$$Register, $result$$Register, $tmp4$$Register,
11507                           $tmp1$$XMMRegister, $tmp2$$XMMRegister);
11508   %}
11509   ins_pipe( pipe_slow );
11510 %}
11511 
11512 // encode char[] to byte[] in ISO_8859_1
11513 instruct encode_iso_array(eSIRegP src, eDIRegP dst, eDXRegI len,
11514                           regD tmp1, regD tmp2, regD tmp3, regD tmp4,
11515                           eCXRegI tmp5, eAXRegI result, eFlagsReg cr) %{
11516   match(Set result (EncodeISOArray src (Binary dst len)));
11517   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL tmp5, KILL cr);
11518 
11519   format %{ "Encode array $src,$dst,$len -> $result    // KILL ECX, EDX, $tmp1, $tmp2, $tmp3, $tmp4, ESI, EDI " %}
11520   ins_encode %{
11521     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
11522                         $tmp1$$XMMRegister, $tmp2$$XMMRegister, $tmp3$$XMMRegister,
11523                         $tmp4$$XMMRegister, $tmp5$$Register, $result$$Register);
11524   %}
11525   ins_pipe( pipe_slow );
11526 %}
11527 
11528 
11529 //----------Control Flow Instructions------------------------------------------
11530 // Signed compare Instructions
11531 instruct compI_eReg(eFlagsReg cr, rRegI op1, rRegI op2) %{
11532   match(Set cr (CmpI op1 op2));
11533   effect( DEF cr, USE op1, USE op2 );
11534   format %{ "CMP    $op1,$op2" %}
11535   opcode(0x3B);  /* Opcode 3B /r */
11536   ins_encode( OpcP, RegReg( op1, op2) );
11537   ins_pipe( ialu_cr_reg_reg );
11538 %}
11539 
11540 instruct compI_eReg_imm(eFlagsReg cr, rRegI op1, immI op2) %{
11541   match(Set cr (CmpI op1 op2));
11542   effect( DEF cr, USE op1 );
11543   format %{ "CMP    $op1,$op2" %}
11544   opcode(0x81,0x07);  /* Opcode 81 /7 */
11545   // ins_encode( RegImm( op1, op2) );  /* Was CmpImm */
11546   ins_encode( OpcSErm( op1, op2 ), Con8or32( op2 ) );
11547   ins_pipe( ialu_cr_reg_imm );
11548 %}
11549 
11550 // Cisc-spilled version of cmpI_eReg
11551 instruct compI_eReg_mem(eFlagsReg cr, rRegI op1, memory op2) %{
11552   match(Set cr (CmpI op1 (LoadI op2)));
11553 
11554   format %{ "CMP    $op1,$op2" %}
11555   ins_cost(500);
11556   opcode(0x3B);  /* Opcode 3B /r */
11557   ins_encode( OpcP, RegMem( op1, op2) );
11558   ins_pipe( ialu_cr_reg_mem );
11559 %}
11560 
11561 instruct testI_reg( eFlagsReg cr, rRegI src, immI0 zero ) %{
11562   match(Set cr (CmpI src zero));
11563   effect( DEF cr, USE src );
11564 
11565   format %{ "TEST   $src,$src" %}
11566   opcode(0x85);
11567   ins_encode( OpcP, RegReg( src, src ) );
11568   ins_pipe( ialu_cr_reg_imm );
11569 %}
11570 
11571 instruct testI_reg_imm( eFlagsReg cr, rRegI src, immI con, immI0 zero ) %{
11572   match(Set cr (CmpI (AndI src con) zero));
11573 
11574   format %{ "TEST   $src,$con" %}
11575   opcode(0xF7,0x00);
11576   ins_encode( OpcP, RegOpc(src), Con32(con) );
11577   ins_pipe( ialu_cr_reg_imm );
11578 %}
11579 
11580 instruct testI_reg_mem( eFlagsReg cr, rRegI src, memory mem, immI0 zero ) %{
11581   match(Set cr (CmpI (AndI src mem) zero));
11582 
11583   format %{ "TEST   $src,$mem" %}
11584   opcode(0x85);
11585   ins_encode( OpcP, RegMem( src, mem ) );
11586   ins_pipe( ialu_cr_reg_mem );
11587 %}
11588 
11589 // Unsigned compare Instructions; really, same as signed except they
11590 // produce an eFlagsRegU instead of eFlagsReg.
11591 instruct compU_eReg(eFlagsRegU cr, rRegI op1, rRegI op2) %{
11592   match(Set cr (CmpU op1 op2));
11593 
11594   format %{ "CMPu   $op1,$op2" %}
11595   opcode(0x3B);  /* Opcode 3B /r */
11596   ins_encode( OpcP, RegReg( op1, op2) );
11597   ins_pipe( ialu_cr_reg_reg );
11598 %}
11599 
11600 instruct compU_eReg_imm(eFlagsRegU cr, rRegI op1, immI op2) %{
11601   match(Set cr (CmpU op1 op2));
11602 
11603   format %{ "CMPu   $op1,$op2" %}
11604   opcode(0x81,0x07);  /* Opcode 81 /7 */
11605   ins_encode( OpcSErm( op1, op2 ), Con8or32( op2 ) );
11606   ins_pipe( ialu_cr_reg_imm );
11607 %}
11608 
11609 // // Cisc-spilled version of cmpU_eReg
11610 instruct compU_eReg_mem(eFlagsRegU cr, rRegI op1, memory op2) %{
11611   match(Set cr (CmpU op1 (LoadI op2)));
11612 
11613   format %{ "CMPu   $op1,$op2" %}
11614   ins_cost(500);
11615   opcode(0x3B);  /* Opcode 3B /r */
11616   ins_encode( OpcP, RegMem( op1, op2) );
11617   ins_pipe( ialu_cr_reg_mem );
11618 %}
11619 
11620 // // Cisc-spilled version of cmpU_eReg
11621 //instruct compU_mem_eReg(eFlagsRegU cr, memory op1, rRegI op2) %{
11622 //  match(Set cr (CmpU (LoadI op1) op2));
11623 //
11624 //  format %{ "CMPu   $op1,$op2" %}
11625 //  ins_cost(500);
11626 //  opcode(0x39);  /* Opcode 39 /r */
11627 //  ins_encode( OpcP, RegMem( op1, op2) );
11628 //%}
11629 
11630 instruct testU_reg( eFlagsRegU cr, rRegI src, immI0 zero ) %{
11631   match(Set cr (CmpU src zero));
11632 
11633   format %{ "TESTu  $src,$src" %}
11634   opcode(0x85);
11635   ins_encode( OpcP, RegReg( src, src ) );
11636   ins_pipe( ialu_cr_reg_imm );
11637 %}
11638 
11639 // Unsigned pointer compare Instructions
11640 instruct compP_eReg(eFlagsRegU cr, eRegP op1, eRegP op2) %{
11641   match(Set cr (CmpP op1 op2));
11642 
11643   format %{ "CMPu   $op1,$op2" %}
11644   opcode(0x3B);  /* Opcode 3B /r */
11645   ins_encode( OpcP, RegReg( op1, op2) );
11646   ins_pipe( ialu_cr_reg_reg );
11647 %}
11648 
11649 instruct compP_eReg_imm(eFlagsRegU cr, eRegP op1, immP op2) %{
11650   match(Set cr (CmpP op1 op2));
11651 
11652   format %{ "CMPu   $op1,$op2" %}
11653   opcode(0x81,0x07);  /* Opcode 81 /7 */
11654   ins_encode( OpcSErm( op1, op2 ), Con8or32( op2 ) );
11655   ins_pipe( ialu_cr_reg_imm );
11656 %}
11657 
11658 // // Cisc-spilled version of cmpP_eReg
11659 instruct compP_eReg_mem(eFlagsRegU cr, eRegP op1, memory op2) %{
11660   match(Set cr (CmpP op1 (LoadP op2)));
11661 
11662   format %{ "CMPu   $op1,$op2" %}
11663   ins_cost(500);
11664   opcode(0x3B);  /* Opcode 3B /r */
11665   ins_encode( OpcP, RegMem( op1, op2) );
11666   ins_pipe( ialu_cr_reg_mem );
11667 %}
11668 
11669 // // Cisc-spilled version of cmpP_eReg
11670 //instruct compP_mem_eReg(eFlagsRegU cr, memory op1, eRegP op2) %{
11671 //  match(Set cr (CmpP (LoadP op1) op2));
11672 //
11673 //  format %{ "CMPu   $op1,$op2" %}
11674 //  ins_cost(500);
11675 //  opcode(0x39);  /* Opcode 39 /r */
11676 //  ins_encode( OpcP, RegMem( op1, op2) );
11677 //%}
11678 
11679 // Compare raw pointer (used in out-of-heap check).
11680 // Only works because non-oop pointers must be raw pointers
11681 // and raw pointers have no anti-dependencies.
11682 instruct compP_mem_eReg( eFlagsRegU cr, eRegP op1, memory op2 ) %{
11683   predicate( n->in(2)->in(2)->bottom_type()->reloc() == relocInfo::none );
11684   match(Set cr (CmpP op1 (LoadP op2)));
11685 
11686   format %{ "CMPu   $op1,$op2" %}
11687   opcode(0x3B);  /* Opcode 3B /r */
11688   ins_encode( OpcP, RegMem( op1, op2) );
11689   ins_pipe( ialu_cr_reg_mem );
11690 %}
11691 
11692 //
11693 // This will generate a signed flags result. This should be ok
11694 // since any compare to a zero should be eq/neq.
11695 instruct testP_reg( eFlagsReg cr, eRegP src, immP0 zero ) %{
11696   match(Set cr (CmpP src zero));
11697 
11698   format %{ "TEST   $src,$src" %}
11699   opcode(0x85);
11700   ins_encode( OpcP, RegReg( src, src ) );
11701   ins_pipe( ialu_cr_reg_imm );
11702 %}
11703 
11704 // Cisc-spilled version of testP_reg
11705 // This will generate a signed flags result. This should be ok
11706 // since any compare to a zero should be eq/neq.
11707 instruct testP_Reg_mem( eFlagsReg cr, memory op, immI0 zero ) %{
11708   match(Set cr (CmpP (LoadP op) zero));
11709 
11710   format %{ "TEST   $op,0xFFFFFFFF" %}
11711   ins_cost(500);
11712   opcode(0xF7);               /* Opcode F7 /0 */
11713   ins_encode( OpcP, RMopc_Mem(0x00,op), Con_d32(0xFFFFFFFF) );
11714   ins_pipe( ialu_cr_reg_imm );
11715 %}
11716 
11717 // Yanked all unsigned pointer compare operations.
11718 // Pointer compares are done with CmpP which is already unsigned.
11719 
11720 //----------Max and Min--------------------------------------------------------
11721 // Min Instructions
11722 ////
11723 //   *** Min and Max using the conditional move are slower than the
11724 //   *** branch version on a Pentium III.
11725 // // Conditional move for min
11726 //instruct cmovI_reg_lt( rRegI op2, rRegI op1, eFlagsReg cr ) %{
11727 //  effect( USE_DEF op2, USE op1, USE cr );
11728 //  format %{ "CMOVlt $op2,$op1\t! min" %}
11729 //  opcode(0x4C,0x0F);
11730 //  ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
11731 //  ins_pipe( pipe_cmov_reg );
11732 //%}
11733 //
11734 //// Min Register with Register (P6 version)
11735 //instruct minI_eReg_p6( rRegI op1, rRegI op2 ) %{
11736 //  predicate(VM_Version::supports_cmov() );
11737 //  match(Set op2 (MinI op1 op2));
11738 //  ins_cost(200);
11739 //  expand %{
11740 //    eFlagsReg cr;
11741 //    compI_eReg(cr,op1,op2);
11742 //    cmovI_reg_lt(op2,op1,cr);
11743 //  %}
11744 //%}
11745 
11746 // Min Register with Register (generic version)
11747 instruct minI_eReg(rRegI dst, rRegI src, eFlagsReg flags) %{
11748   match(Set dst (MinI dst src));
11749   effect(KILL flags);
11750   ins_cost(300);
11751 
11752   format %{ "MIN    $dst,$src" %}
11753   opcode(0xCC);
11754   ins_encode( min_enc(dst,src) );
11755   ins_pipe( pipe_slow );
11756 %}
11757 
11758 // Max Register with Register
11759 //   *** Min and Max using the conditional move are slower than the
11760 //   *** branch version on a Pentium III.
11761 // // Conditional move for max
11762 //instruct cmovI_reg_gt( rRegI op2, rRegI op1, eFlagsReg cr ) %{
11763 //  effect( USE_DEF op2, USE op1, USE cr );
11764 //  format %{ "CMOVgt $op2,$op1\t! max" %}
11765 //  opcode(0x4F,0x0F);
11766 //  ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
11767 //  ins_pipe( pipe_cmov_reg );
11768 //%}
11769 //
11770 // // Max Register with Register (P6 version)
11771 //instruct maxI_eReg_p6( rRegI op1, rRegI op2 ) %{
11772 //  predicate(VM_Version::supports_cmov() );
11773 //  match(Set op2 (MaxI op1 op2));
11774 //  ins_cost(200);
11775 //  expand %{
11776 //    eFlagsReg cr;
11777 //    compI_eReg(cr,op1,op2);
11778 //    cmovI_reg_gt(op2,op1,cr);
11779 //  %}
11780 //%}
11781 
11782 // Max Register with Register (generic version)
11783 instruct maxI_eReg(rRegI dst, rRegI src, eFlagsReg flags) %{
11784   match(Set dst (MaxI dst src));
11785   effect(KILL flags);
11786   ins_cost(300);
11787 
11788   format %{ "MAX    $dst,$src" %}
11789   opcode(0xCC);
11790   ins_encode( max_enc(dst,src) );
11791   ins_pipe( pipe_slow );
11792 %}
11793 
11794 // ============================================================================
11795 // Counted Loop limit node which represents exact final iterator value.
11796 // Note: the resulting value should fit into integer range since
11797 // counted loops have limit check on overflow.
11798 instruct loopLimit_eReg(eAXRegI limit, nadxRegI init, immI stride, eDXRegI limit_hi, nadxRegI tmp, eFlagsReg flags) %{
11799   match(Set limit (LoopLimit (Binary init limit) stride));
11800   effect(TEMP limit_hi, TEMP tmp, KILL flags);
11801   ins_cost(300);
11802 
11803   format %{ "loopLimit $init,$limit,$stride  # $limit = $init + $stride *( $limit - $init + $stride -1)/ $stride, kills $limit_hi" %}
11804   ins_encode %{
11805     int strd = (int)$stride$$constant;
11806     assert(strd != 1 && strd != -1, "sanity");
11807     int m1 = (strd > 0) ? 1 : -1;
11808     // Convert limit to long (EAX:EDX)
11809     __ cdql();
11810     // Convert init to long (init:tmp)
11811     __ movl($tmp$$Register, $init$$Register);
11812     __ sarl($tmp$$Register, 31);
11813     // $limit - $init
11814     __ subl($limit$$Register, $init$$Register);
11815     __ sbbl($limit_hi$$Register, $tmp$$Register);
11816     // + ($stride - 1)
11817     if (strd > 0) {
11818       __ addl($limit$$Register, (strd - 1));
11819       __ adcl($limit_hi$$Register, 0);
11820       __ movl($tmp$$Register, strd);
11821     } else {
11822       __ addl($limit$$Register, (strd + 1));
11823       __ adcl($limit_hi$$Register, -1);
11824       __ lneg($limit_hi$$Register, $limit$$Register);
11825       __ movl($tmp$$Register, -strd);
11826     }
11827     // signed devision: (EAX:EDX) / pos_stride
11828     __ idivl($tmp$$Register);
11829     if (strd < 0) {
11830       // restore sign
11831       __ negl($tmp$$Register);
11832     }
11833     // (EAX) * stride
11834     __ mull($tmp$$Register);
11835     // + init (ignore upper bits)
11836     __ addl($limit$$Register, $init$$Register);
11837   %}
11838   ins_pipe( pipe_slow );
11839 %}
11840 
11841 // ============================================================================
11842 // Branch Instructions
11843 // Jump Table
11844 instruct jumpXtnd(rRegI switch_val) %{
11845   match(Jump switch_val);
11846   ins_cost(350);
11847   format %{  "JMP    [$constantaddress](,$switch_val,1)\n\t" %}
11848   ins_encode %{
11849     // Jump to Address(table_base + switch_reg)
11850     Address index(noreg, $switch_val$$Register, Address::times_1);
11851     __ jump(ArrayAddress($constantaddress, index));
11852   %}
11853   ins_pipe(pipe_jmp);
11854 %}
11855 
11856 // Jump Direct - Label defines a relative address from JMP+1
11857 instruct jmpDir(label labl) %{
11858   match(Goto);
11859   effect(USE labl);
11860 
11861   ins_cost(300);
11862   format %{ "JMP    $labl" %}
11863   size(5);
11864   ins_encode %{
11865     Label* L = $labl$$label;
11866     __ jmp(*L, false); // Always long jump
11867   %}
11868   ins_pipe( pipe_jmp );
11869 %}
11870 
11871 // Jump Direct Conditional - Label defines a relative address from Jcc+1
11872 instruct jmpCon(cmpOp cop, eFlagsReg cr, label labl) %{
11873   match(If cop cr);
11874   effect(USE labl);
11875 
11876   ins_cost(300);
11877   format %{ "J$cop    $labl" %}
11878   size(6);
11879   ins_encode %{
11880     Label* L = $labl$$label;
11881     __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump
11882   %}
11883   ins_pipe( pipe_jcc );
11884 %}
11885 
11886 // Jump Direct Conditional - Label defines a relative address from Jcc+1
11887 instruct jmpLoopEnd(cmpOp cop, eFlagsReg cr, label labl) %{
11888   match(CountedLoopEnd cop cr);
11889   effect(USE labl);
11890 
11891   ins_cost(300);
11892   format %{ "J$cop    $labl\t# Loop end" %}
11893   size(6);
11894   ins_encode %{
11895     Label* L = $labl$$label;
11896     __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump
11897   %}
11898   ins_pipe( pipe_jcc );
11899 %}
11900 
11901 // Jump Direct Conditional - Label defines a relative address from Jcc+1
11902 instruct jmpLoopEndU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
11903   match(CountedLoopEnd cop cmp);
11904   effect(USE labl);
11905 
11906   ins_cost(300);
11907   format %{ "J$cop,u  $labl\t# Loop end" %}
11908   size(6);
11909   ins_encode %{
11910     Label* L = $labl$$label;
11911     __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump
11912   %}
11913   ins_pipe( pipe_jcc );
11914 %}
11915 
11916 instruct jmpLoopEndUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
11917   match(CountedLoopEnd cop cmp);
11918   effect(USE labl);
11919 
11920   ins_cost(200);
11921   format %{ "J$cop,u  $labl\t# Loop end" %}
11922   size(6);
11923   ins_encode %{
11924     Label* L = $labl$$label;
11925     __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump
11926   %}
11927   ins_pipe( pipe_jcc );
11928 %}
11929 
11930 // Jump Direct Conditional - using unsigned comparison
11931 instruct jmpConU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
11932   match(If cop cmp);
11933   effect(USE labl);
11934 
11935   ins_cost(300);
11936   format %{ "J$cop,u  $labl" %}
11937   size(6);
11938   ins_encode %{
11939     Label* L = $labl$$label;
11940     __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump
11941   %}
11942   ins_pipe(pipe_jcc);
11943 %}
11944 
11945 instruct jmpConUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
11946   match(If cop cmp);
11947   effect(USE labl);
11948 
11949   ins_cost(200);
11950   format %{ "J$cop,u  $labl" %}
11951   size(6);
11952   ins_encode %{
11953     Label* L = $labl$$label;
11954     __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump
11955   %}
11956   ins_pipe(pipe_jcc);
11957 %}
11958 
11959 instruct jmpConUCF2(cmpOpUCF2 cop, eFlagsRegUCF cmp, label labl) %{
11960   match(If cop cmp);
11961   effect(USE labl);
11962 
11963   ins_cost(200);
11964   format %{ $$template
11965     if ($cop$$cmpcode == Assembler::notEqual) {
11966       $$emit$$"JP,u   $labl\n\t"
11967       $$emit$$"J$cop,u   $labl"
11968     } else {
11969       $$emit$$"JP,u   done\n\t"
11970       $$emit$$"J$cop,u   $labl\n\t"
11971       $$emit$$"done:"
11972     }
11973   %}
11974   ins_encode %{
11975     Label* l = $labl$$label;
11976     if ($cop$$cmpcode == Assembler::notEqual) {
11977       __ jcc(Assembler::parity, *l, false);
11978       __ jcc(Assembler::notEqual, *l, false);
11979     } else if ($cop$$cmpcode == Assembler::equal) {
11980       Label done;
11981       __ jccb(Assembler::parity, done);
11982       __ jcc(Assembler::equal, *l, false);
11983       __ bind(done);
11984     } else {
11985        ShouldNotReachHere();
11986     }
11987   %}
11988   ins_pipe(pipe_jcc);
11989 %}
11990 
11991 // ============================================================================
11992 // The 2nd slow-half of a subtype check.  Scan the subklass's 2ndary superklass
11993 // array for an instance of the superklass.  Set a hidden internal cache on a
11994 // hit (cache is checked with exposed code in gen_subtype_check()).  Return
11995 // NZ for a miss or zero for a hit.  The encoding ALSO sets flags.
11996 instruct partialSubtypeCheck( eDIRegP result, eSIRegP sub, eAXRegP super, eCXRegI rcx, eFlagsReg cr ) %{
11997   match(Set result (PartialSubtypeCheck sub super));
11998   effect( KILL rcx, KILL cr );
11999 
12000   ins_cost(1100);  // slightly larger than the next version
12001   format %{ "MOV    EDI,[$sub+Klass::secondary_supers]\n\t"
12002             "MOV    ECX,[EDI+ArrayKlass::length]\t# length to scan\n\t"
12003             "ADD    EDI,ArrayKlass::base_offset\t# Skip to start of data; set NZ in case count is zero\n\t"
12004             "REPNE SCASD\t# Scan *EDI++ for a match with EAX while CX-- != 0\n\t"
12005             "JNE,s  miss\t\t# Missed: EDI not-zero\n\t"
12006             "MOV    [$sub+Klass::secondary_super_cache],$super\t# Hit: update cache\n\t"
12007             "XOR    $result,$result\t\t Hit: EDI zero\n\t"
12008      "miss:\t" %}
12009 
12010   opcode(0x1); // Force a XOR of EDI
12011   ins_encode( enc_PartialSubtypeCheck() );
12012   ins_pipe( pipe_slow );
12013 %}
12014 
12015 instruct partialSubtypeCheck_vs_Zero( eFlagsReg cr, eSIRegP sub, eAXRegP super, eCXRegI rcx, eDIRegP result, immP0 zero ) %{
12016   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
12017   effect( KILL rcx, KILL result );
12018 
12019   ins_cost(1000);
12020   format %{ "MOV    EDI,[$sub+Klass::secondary_supers]\n\t"
12021             "MOV    ECX,[EDI+ArrayKlass::length]\t# length to scan\n\t"
12022             "ADD    EDI,ArrayKlass::base_offset\t# Skip to start of data; set NZ in case count is zero\n\t"
12023             "REPNE SCASD\t# Scan *EDI++ for a match with EAX while CX-- != 0\n\t"
12024             "JNE,s  miss\t\t# Missed: flags NZ\n\t"
12025             "MOV    [$sub+Klass::secondary_super_cache],$super\t# Hit: update cache, flags Z\n\t"
12026      "miss:\t" %}
12027 
12028   opcode(0x0);  // No need to XOR EDI
12029   ins_encode( enc_PartialSubtypeCheck() );
12030   ins_pipe( pipe_slow );
12031 %}
12032 
12033 // ============================================================================
12034 // Branch Instructions -- short offset versions
12035 //
12036 // These instructions are used to replace jumps of a long offset (the default
12037 // match) with jumps of a shorter offset.  These instructions are all tagged
12038 // with the ins_short_branch attribute, which causes the ADLC to suppress the
12039 // match rules in general matching.  Instead, the ADLC generates a conversion
12040 // method in the MachNode which can be used to do in-place replacement of the
12041 // long variant with the shorter variant.  The compiler will determine if a
12042 // branch can be taken by the is_short_branch_offset() predicate in the machine
12043 // specific code section of the file.
12044 
12045 // Jump Direct - Label defines a relative address from JMP+1
12046 instruct jmpDir_short(label labl) %{
12047   match(Goto);
12048   effect(USE labl);
12049 
12050   ins_cost(300);
12051   format %{ "JMP,s  $labl" %}
12052   size(2);
12053   ins_encode %{
12054     Label* L = $labl$$label;
12055     __ jmpb(*L);
12056   %}
12057   ins_pipe( pipe_jmp );
12058   ins_short_branch(1);
12059 %}
12060 
12061 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12062 instruct jmpCon_short(cmpOp cop, eFlagsReg cr, label labl) %{
12063   match(If cop cr);
12064   effect(USE labl);
12065 
12066   ins_cost(300);
12067   format %{ "J$cop,s  $labl" %}
12068   size(2);
12069   ins_encode %{
12070     Label* L = $labl$$label;
12071     __ jccb((Assembler::Condition)($cop$$cmpcode), *L);
12072   %}
12073   ins_pipe( pipe_jcc );
12074   ins_short_branch(1);
12075 %}
12076 
12077 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12078 instruct jmpLoopEnd_short(cmpOp cop, eFlagsReg cr, label labl) %{
12079   match(CountedLoopEnd cop cr);
12080   effect(USE labl);
12081 
12082   ins_cost(300);
12083   format %{ "J$cop,s  $labl\t# Loop end" %}
12084   size(2);
12085   ins_encode %{
12086     Label* L = $labl$$label;
12087     __ jccb((Assembler::Condition)($cop$$cmpcode), *L);
12088   %}
12089   ins_pipe( pipe_jcc );
12090   ins_short_branch(1);
12091 %}
12092 
12093 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12094 instruct jmpLoopEndU_short(cmpOpU cop, eFlagsRegU cmp, label labl) %{
12095   match(CountedLoopEnd cop cmp);
12096   effect(USE labl);
12097 
12098   ins_cost(300);
12099   format %{ "J$cop,us $labl\t# Loop end" %}
12100   size(2);
12101   ins_encode %{
12102     Label* L = $labl$$label;
12103     __ jccb((Assembler::Condition)($cop$$cmpcode), *L);
12104   %}
12105   ins_pipe( pipe_jcc );
12106   ins_short_branch(1);
12107 %}
12108 
12109 instruct jmpLoopEndUCF_short(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
12110   match(CountedLoopEnd cop cmp);
12111   effect(USE labl);
12112 
12113   ins_cost(300);
12114   format %{ "J$cop,us $labl\t# Loop end" %}
12115   size(2);
12116   ins_encode %{
12117     Label* L = $labl$$label;
12118     __ jccb((Assembler::Condition)($cop$$cmpcode), *L);
12119   %}
12120   ins_pipe( pipe_jcc );
12121   ins_short_branch(1);
12122 %}
12123 
12124 // Jump Direct Conditional - using unsigned comparison
12125 instruct jmpConU_short(cmpOpU cop, eFlagsRegU cmp, label labl) %{
12126   match(If cop cmp);
12127   effect(USE labl);
12128 
12129   ins_cost(300);
12130   format %{ "J$cop,us $labl" %}
12131   size(2);
12132   ins_encode %{
12133     Label* L = $labl$$label;
12134     __ jccb((Assembler::Condition)($cop$$cmpcode), *L);
12135   %}
12136   ins_pipe( pipe_jcc );
12137   ins_short_branch(1);
12138 %}
12139 
12140 instruct jmpConUCF_short(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
12141   match(If cop cmp);
12142   effect(USE labl);
12143 
12144   ins_cost(300);
12145   format %{ "J$cop,us $labl" %}
12146   size(2);
12147   ins_encode %{
12148     Label* L = $labl$$label;
12149     __ jccb((Assembler::Condition)($cop$$cmpcode), *L);
12150   %}
12151   ins_pipe( pipe_jcc );
12152   ins_short_branch(1);
12153 %}
12154 
12155 instruct jmpConUCF2_short(cmpOpUCF2 cop, eFlagsRegUCF cmp, label labl) %{
12156   match(If cop cmp);
12157   effect(USE labl);
12158 
12159   ins_cost(300);
12160   format %{ $$template
12161     if ($cop$$cmpcode == Assembler::notEqual) {
12162       $$emit$$"JP,u,s   $labl\n\t"
12163       $$emit$$"J$cop,u,s   $labl"
12164     } else {
12165       $$emit$$"JP,u,s   done\n\t"
12166       $$emit$$"J$cop,u,s  $labl\n\t"
12167       $$emit$$"done:"
12168     }
12169   %}
12170   size(4);
12171   ins_encode %{
12172     Label* l = $labl$$label;
12173     if ($cop$$cmpcode == Assembler::notEqual) {
12174       __ jccb(Assembler::parity, *l);
12175       __ jccb(Assembler::notEqual, *l);
12176     } else if ($cop$$cmpcode == Assembler::equal) {
12177       Label done;
12178       __ jccb(Assembler::parity, done);
12179       __ jccb(Assembler::equal, *l);
12180       __ bind(done);
12181     } else {
12182        ShouldNotReachHere();
12183     }
12184   %}
12185   ins_pipe(pipe_jcc);
12186   ins_short_branch(1);
12187 %}
12188 
12189 // ============================================================================
12190 // Long Compare
12191 //
12192 // Currently we hold longs in 2 registers.  Comparing such values efficiently
12193 // is tricky.  The flavor of compare used depends on whether we are testing
12194 // for LT, LE, or EQ.  For a simple LT test we can check just the sign bit.
12195 // The GE test is the negated LT test.  The LE test can be had by commuting
12196 // the operands (yielding a GE test) and then negating; negate again for the
12197 // GT test.  The EQ test is done by ORcc'ing the high and low halves, and the
12198 // NE test is negated from that.
12199 
12200 // Due to a shortcoming in the ADLC, it mixes up expressions like:
12201 // (foo (CmpI (CmpL X Y) 0)) and (bar (CmpI (CmpL X 0L) 0)).  Note the
12202 // difference between 'Y' and '0L'.  The tree-matches for the CmpI sections
12203 // are collapsed internally in the ADLC's dfa-gen code.  The match for
12204 // (CmpI (CmpL X Y) 0) is silently replaced with (CmpI (CmpL X 0L) 0) and the
12205 // foo match ends up with the wrong leaf.  One fix is to not match both
12206 // reg-reg and reg-zero forms of long-compare.  This is unfortunate because
12207 // both forms beat the trinary form of long-compare and both are very useful
12208 // on Intel which has so few registers.
12209 
12210 // Manifest a CmpL result in an integer register.  Very painful.
12211 // This is the test to avoid.
12212 instruct cmpL3_reg_reg(eSIRegI dst, eRegL src1, eRegL src2, eFlagsReg flags ) %{
12213   match(Set dst (CmpL3 src1 src2));
12214   effect( KILL flags );
12215   ins_cost(1000);
12216   format %{ "XOR    $dst,$dst\n\t"
12217             "CMP    $src1.hi,$src2.hi\n\t"
12218             "JLT,s  m_one\n\t"
12219             "JGT,s  p_one\n\t"
12220             "CMP    $src1.lo,$src2.lo\n\t"
12221             "JB,s   m_one\n\t"
12222             "JEQ,s  done\n"
12223     "p_one:\tINC    $dst\n\t"
12224             "JMP,s  done\n"
12225     "m_one:\tDEC    $dst\n"
12226      "done:" %}
12227   ins_encode %{
12228     Label p_one, m_one, done;
12229     __ xorptr($dst$$Register, $dst$$Register);
12230     __ cmpl(HIGH_FROM_LOW($src1$$Register), HIGH_FROM_LOW($src2$$Register));
12231     __ jccb(Assembler::less,    m_one);
12232     __ jccb(Assembler::greater, p_one);
12233     __ cmpl($src1$$Register, $src2$$Register);
12234     __ jccb(Assembler::below,   m_one);
12235     __ jccb(Assembler::equal,   done);
12236     __ bind(p_one);
12237     __ incrementl($dst$$Register);
12238     __ jmpb(done);
12239     __ bind(m_one);
12240     __ decrementl($dst$$Register);
12241     __ bind(done);
12242   %}
12243   ins_pipe( pipe_slow );
12244 %}
12245 
12246 //======
12247 // Manifest a CmpL result in the normal flags.  Only good for LT or GE
12248 // compares.  Can be used for LE or GT compares by reversing arguments.
12249 // NOT GOOD FOR EQ/NE tests.
12250 instruct cmpL_zero_flags_LTGE( flagsReg_long_LTGE flags, eRegL src, immL0 zero ) %{
12251   match( Set flags (CmpL src zero ));
12252   ins_cost(100);
12253   format %{ "TEST   $src.hi,$src.hi" %}
12254   opcode(0x85);
12255   ins_encode( OpcP, RegReg_Hi2( src, src ) );
12256   ins_pipe( ialu_cr_reg_reg );
12257 %}
12258 
12259 // Manifest a CmpL result in the normal flags.  Only good for LT or GE
12260 // compares.  Can be used for LE or GT compares by reversing arguments.
12261 // NOT GOOD FOR EQ/NE tests.
12262 instruct cmpL_reg_flags_LTGE( flagsReg_long_LTGE flags, eRegL src1, eRegL src2, rRegI tmp ) %{
12263   match( Set flags (CmpL src1 src2 ));
12264   effect( TEMP tmp );
12265   ins_cost(300);
12266   format %{ "CMP    $src1.lo,$src2.lo\t! Long compare; set flags for low bits\n\t"
12267             "MOV    $tmp,$src1.hi\n\t"
12268             "SBB    $tmp,$src2.hi\t! Compute flags for long compare" %}
12269   ins_encode( long_cmp_flags2( src1, src2, tmp ) );
12270   ins_pipe( ialu_cr_reg_reg );
12271 %}
12272 
12273 // Long compares reg < zero/req OR reg >= zero/req.
12274 // Just a wrapper for a normal branch, plus the predicate test.
12275 instruct cmpL_LTGE(cmpOp cmp, flagsReg_long_LTGE flags, label labl) %{
12276   match(If cmp flags);
12277   effect(USE labl);
12278   predicate( _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge );
12279   expand %{
12280     jmpCon(cmp,flags,labl);    // JLT or JGE...
12281   %}
12282 %}
12283 
12284 // Compare 2 longs and CMOVE longs.
12285 instruct cmovLL_reg_LTGE(cmpOp cmp, flagsReg_long_LTGE flags, eRegL dst, eRegL src) %{
12286   match(Set dst (CMoveL (Binary cmp flags) (Binary dst src)));
12287   predicate(VM_Version::supports_cmov() && ( _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge ));
12288   ins_cost(400);
12289   format %{ "CMOV$cmp $dst.lo,$src.lo\n\t"
12290             "CMOV$cmp $dst.hi,$src.hi" %}
12291   opcode(0x0F,0x40);
12292   ins_encode( enc_cmov(cmp), RegReg_Lo2( dst, src ), enc_cmov(cmp), RegReg_Hi2( dst, src ) );
12293   ins_pipe( pipe_cmov_reg_long );
12294 %}
12295 
12296 instruct cmovLL_mem_LTGE(cmpOp cmp, flagsReg_long_LTGE flags, eRegL dst, load_long_memory src) %{
12297   match(Set dst (CMoveL (Binary cmp flags) (Binary dst (LoadL src))));
12298   predicate(VM_Version::supports_cmov() && ( _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge ));
12299   ins_cost(500);
12300   format %{ "CMOV$cmp $dst.lo,$src.lo\n\t"
12301             "CMOV$cmp $dst.hi,$src.hi" %}
12302   opcode(0x0F,0x40);
12303   ins_encode( enc_cmov(cmp), RegMem(dst, src), enc_cmov(cmp), RegMem_Hi(dst, src) );
12304   ins_pipe( pipe_cmov_reg_long );
12305 %}
12306 
12307 // Compare 2 longs and CMOVE ints.
12308 instruct cmovII_reg_LTGE(cmpOp cmp, flagsReg_long_LTGE flags, rRegI dst, rRegI src) %{
12309   predicate(VM_Version::supports_cmov() && ( _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge ));
12310   match(Set dst (CMoveI (Binary cmp flags) (Binary dst src)));
12311   ins_cost(200);
12312   format %{ "CMOV$cmp $dst,$src" %}
12313   opcode(0x0F,0x40);
12314   ins_encode( enc_cmov(cmp), RegReg( dst, src ) );
12315   ins_pipe( pipe_cmov_reg );
12316 %}
12317 
12318 instruct cmovII_mem_LTGE(cmpOp cmp, flagsReg_long_LTGE flags, rRegI dst, memory src) %{
12319   predicate(VM_Version::supports_cmov() && ( _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge ));
12320   match(Set dst (CMoveI (Binary cmp flags) (Binary dst (LoadI src))));
12321   ins_cost(250);
12322   format %{ "CMOV$cmp $dst,$src" %}
12323   opcode(0x0F,0x40);
12324   ins_encode( enc_cmov(cmp), RegMem( dst, src ) );
12325   ins_pipe( pipe_cmov_mem );
12326 %}
12327 
12328 // Compare 2 longs and CMOVE ints.
12329 instruct cmovPP_reg_LTGE(cmpOp cmp, flagsReg_long_LTGE flags, eRegP dst, eRegP src) %{
12330   predicate(VM_Version::supports_cmov() && ( _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge ));
12331   match(Set dst (CMoveP (Binary cmp flags) (Binary dst src)));
12332   ins_cost(200);
12333   format %{ "CMOV$cmp $dst,$src" %}
12334   opcode(0x0F,0x40);
12335   ins_encode( enc_cmov(cmp), RegReg( dst, src ) );
12336   ins_pipe( pipe_cmov_reg );
12337 %}
12338 
12339 // Compare 2 longs and CMOVE doubles
12340 instruct cmovDDPR_reg_LTGE(cmpOp cmp, flagsReg_long_LTGE flags, regDPR dst, regDPR src) %{
12341   predicate( UseSSE<=1 && _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge );
12342   match(Set dst (CMoveD (Binary cmp flags) (Binary dst src)));
12343   ins_cost(200);
12344   expand %{
12345     fcmovDPR_regS(cmp,flags,dst,src);
12346   %}
12347 %}
12348 
12349 // Compare 2 longs and CMOVE doubles
12350 instruct cmovDD_reg_LTGE(cmpOp cmp, flagsReg_long_LTGE flags, regD dst, regD src) %{
12351   predicate( UseSSE>=2 && _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge );
12352   match(Set dst (CMoveD (Binary cmp flags) (Binary dst src)));
12353   ins_cost(200);
12354   expand %{
12355     fcmovD_regS(cmp,flags,dst,src);
12356   %}
12357 %}
12358 
12359 instruct cmovFFPR_reg_LTGE(cmpOp cmp, flagsReg_long_LTGE flags, regFPR dst, regFPR src) %{
12360   predicate( UseSSE==0 && _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge );
12361   match(Set dst (CMoveF (Binary cmp flags) (Binary dst src)));
12362   ins_cost(200);
12363   expand %{
12364     fcmovFPR_regS(cmp,flags,dst,src);
12365   %}
12366 %}
12367 
12368 instruct cmovFF_reg_LTGE(cmpOp cmp, flagsReg_long_LTGE flags, regF dst, regF src) %{
12369   predicate( UseSSE>=1 && _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge );
12370   match(Set dst (CMoveF (Binary cmp flags) (Binary dst src)));
12371   ins_cost(200);
12372   expand %{
12373     fcmovF_regS(cmp,flags,dst,src);
12374   %}
12375 %}
12376 
12377 //======
12378 // Manifest a CmpL result in the normal flags.  Only good for EQ/NE compares.
12379 instruct cmpL_zero_flags_EQNE( flagsReg_long_EQNE flags, eRegL src, immL0 zero, rRegI tmp ) %{
12380   match( Set flags (CmpL src zero ));
12381   effect(TEMP tmp);
12382   ins_cost(200);
12383   format %{ "MOV    $tmp,$src.lo\n\t"
12384             "OR     $tmp,$src.hi\t! Long is EQ/NE 0?" %}
12385   ins_encode( long_cmp_flags0( src, tmp ) );
12386   ins_pipe( ialu_reg_reg_long );
12387 %}
12388 
12389 // Manifest a CmpL result in the normal flags.  Only good for EQ/NE compares.
12390 instruct cmpL_reg_flags_EQNE( flagsReg_long_EQNE flags, eRegL src1, eRegL src2 ) %{
12391   match( Set flags (CmpL src1 src2 ));
12392   ins_cost(200+300);
12393   format %{ "CMP    $src1.lo,$src2.lo\t! Long compare; set flags for low bits\n\t"
12394             "JNE,s  skip\n\t"
12395             "CMP    $src1.hi,$src2.hi\n\t"
12396      "skip:\t" %}
12397   ins_encode( long_cmp_flags1( src1, src2 ) );
12398   ins_pipe( ialu_cr_reg_reg );
12399 %}
12400 
12401 // Long compare reg == zero/reg OR reg != zero/reg
12402 // Just a wrapper for a normal branch, plus the predicate test.
12403 instruct cmpL_EQNE(cmpOp cmp, flagsReg_long_EQNE flags, label labl) %{
12404   match(If cmp flags);
12405   effect(USE labl);
12406   predicate( _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne );
12407   expand %{
12408     jmpCon(cmp,flags,labl);    // JEQ or JNE...
12409   %}
12410 %}
12411 
12412 // Compare 2 longs and CMOVE longs.
12413 instruct cmovLL_reg_EQNE(cmpOp cmp, flagsReg_long_EQNE flags, eRegL dst, eRegL src) %{
12414   match(Set dst (CMoveL (Binary cmp flags) (Binary dst src)));
12415   predicate(VM_Version::supports_cmov() && ( _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ));
12416   ins_cost(400);
12417   format %{ "CMOV$cmp $dst.lo,$src.lo\n\t"
12418             "CMOV$cmp $dst.hi,$src.hi" %}
12419   opcode(0x0F,0x40);
12420   ins_encode( enc_cmov(cmp), RegReg_Lo2( dst, src ), enc_cmov(cmp), RegReg_Hi2( dst, src ) );
12421   ins_pipe( pipe_cmov_reg_long );
12422 %}
12423 
12424 instruct cmovLL_mem_EQNE(cmpOp cmp, flagsReg_long_EQNE flags, eRegL dst, load_long_memory src) %{
12425   match(Set dst (CMoveL (Binary cmp flags) (Binary dst (LoadL src))));
12426   predicate(VM_Version::supports_cmov() && ( _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ));
12427   ins_cost(500);
12428   format %{ "CMOV$cmp $dst.lo,$src.lo\n\t"
12429             "CMOV$cmp $dst.hi,$src.hi" %}
12430   opcode(0x0F,0x40);
12431   ins_encode( enc_cmov(cmp), RegMem(dst, src), enc_cmov(cmp), RegMem_Hi(dst, src) );
12432   ins_pipe( pipe_cmov_reg_long );
12433 %}
12434 
12435 // Compare 2 longs and CMOVE ints.
12436 instruct cmovII_reg_EQNE(cmpOp cmp, flagsReg_long_EQNE flags, rRegI dst, rRegI src) %{
12437   predicate(VM_Version::supports_cmov() && ( _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ));
12438   match(Set dst (CMoveI (Binary cmp flags) (Binary dst src)));
12439   ins_cost(200);
12440   format %{ "CMOV$cmp $dst,$src" %}
12441   opcode(0x0F,0x40);
12442   ins_encode( enc_cmov(cmp), RegReg( dst, src ) );
12443   ins_pipe( pipe_cmov_reg );
12444 %}
12445 
12446 instruct cmovII_mem_EQNE(cmpOp cmp, flagsReg_long_EQNE flags, rRegI dst, memory src) %{
12447   predicate(VM_Version::supports_cmov() && ( _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ));
12448   match(Set dst (CMoveI (Binary cmp flags) (Binary dst (LoadI src))));
12449   ins_cost(250);
12450   format %{ "CMOV$cmp $dst,$src" %}
12451   opcode(0x0F,0x40);
12452   ins_encode( enc_cmov(cmp), RegMem( dst, src ) );
12453   ins_pipe( pipe_cmov_mem );
12454 %}
12455 
12456 // Compare 2 longs and CMOVE ints.
12457 instruct cmovPP_reg_EQNE(cmpOp cmp, flagsReg_long_EQNE flags, eRegP dst, eRegP src) %{
12458   predicate(VM_Version::supports_cmov() && ( _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ));
12459   match(Set dst (CMoveP (Binary cmp flags) (Binary dst src)));
12460   ins_cost(200);
12461   format %{ "CMOV$cmp $dst,$src" %}
12462   opcode(0x0F,0x40);
12463   ins_encode( enc_cmov(cmp), RegReg( dst, src ) );
12464   ins_pipe( pipe_cmov_reg );
12465 %}
12466 
12467 // Compare 2 longs and CMOVE doubles
12468 instruct cmovDDPR_reg_EQNE(cmpOp cmp, flagsReg_long_EQNE flags, regDPR dst, regDPR src) %{
12469   predicate( UseSSE<=1 && _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne );
12470   match(Set dst (CMoveD (Binary cmp flags) (Binary dst src)));
12471   ins_cost(200);
12472   expand %{
12473     fcmovDPR_regS(cmp,flags,dst,src);
12474   %}
12475 %}
12476 
12477 // Compare 2 longs and CMOVE doubles
12478 instruct cmovDD_reg_EQNE(cmpOp cmp, flagsReg_long_EQNE flags, regD dst, regD src) %{
12479   predicate( UseSSE>=2 && _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne );
12480   match(Set dst (CMoveD (Binary cmp flags) (Binary dst src)));
12481   ins_cost(200);
12482   expand %{
12483     fcmovD_regS(cmp,flags,dst,src);
12484   %}
12485 %}
12486 
12487 instruct cmovFFPR_reg_EQNE(cmpOp cmp, flagsReg_long_EQNE flags, regFPR dst, regFPR src) %{
12488   predicate( UseSSE==0 && _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne );
12489   match(Set dst (CMoveF (Binary cmp flags) (Binary dst src)));
12490   ins_cost(200);
12491   expand %{
12492     fcmovFPR_regS(cmp,flags,dst,src);
12493   %}
12494 %}
12495 
12496 instruct cmovFF_reg_EQNE(cmpOp cmp, flagsReg_long_EQNE flags, regF dst, regF src) %{
12497   predicate( UseSSE>=1 && _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne );
12498   match(Set dst (CMoveF (Binary cmp flags) (Binary dst src)));
12499   ins_cost(200);
12500   expand %{
12501     fcmovF_regS(cmp,flags,dst,src);
12502   %}
12503 %}
12504 
12505 //======
12506 // Manifest a CmpL result in the normal flags.  Only good for LE or GT compares.
12507 // Same as cmpL_reg_flags_LEGT except must negate src
12508 instruct cmpL_zero_flags_LEGT( flagsReg_long_LEGT flags, eRegL src, immL0 zero, rRegI tmp ) %{
12509   match( Set flags (CmpL src zero ));
12510   effect( TEMP tmp );
12511   ins_cost(300);
12512   format %{ "XOR    $tmp,$tmp\t# Long compare for -$src < 0, use commuted test\n\t"
12513             "CMP    $tmp,$src.lo\n\t"
12514             "SBB    $tmp,$src.hi\n\t" %}
12515   ins_encode( long_cmp_flags3(src, tmp) );
12516   ins_pipe( ialu_reg_reg_long );
12517 %}
12518 
12519 // Manifest a CmpL result in the normal flags.  Only good for LE or GT compares.
12520 // Same as cmpL_reg_flags_LTGE except operands swapped.  Swapping operands
12521 // requires a commuted test to get the same result.
12522 instruct cmpL_reg_flags_LEGT( flagsReg_long_LEGT flags, eRegL src1, eRegL src2, rRegI tmp ) %{
12523   match( Set flags (CmpL src1 src2 ));
12524   effect( TEMP tmp );
12525   ins_cost(300);
12526   format %{ "CMP    $src2.lo,$src1.lo\t! Long compare, swapped operands, use with commuted test\n\t"
12527             "MOV    $tmp,$src2.hi\n\t"
12528             "SBB    $tmp,$src1.hi\t! Compute flags for long compare" %}
12529   ins_encode( long_cmp_flags2( src2, src1, tmp ) );
12530   ins_pipe( ialu_cr_reg_reg );
12531 %}
12532 
12533 // Long compares reg < zero/req OR reg >= zero/req.
12534 // Just a wrapper for a normal branch, plus the predicate test
12535 instruct cmpL_LEGT(cmpOp_commute cmp, flagsReg_long_LEGT flags, label labl) %{
12536   match(If cmp flags);
12537   effect(USE labl);
12538   predicate( _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt || _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le );
12539   ins_cost(300);
12540   expand %{
12541     jmpCon(cmp,flags,labl);    // JGT or JLE...
12542   %}
12543 %}
12544 
12545 // Compare 2 longs and CMOVE longs.
12546 instruct cmovLL_reg_LEGT(cmpOp_commute cmp, flagsReg_long_LEGT flags, eRegL dst, eRegL src) %{
12547   match(Set dst (CMoveL (Binary cmp flags) (Binary dst src)));
12548   predicate(VM_Version::supports_cmov() && ( _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt ));
12549   ins_cost(400);
12550   format %{ "CMOV$cmp $dst.lo,$src.lo\n\t"
12551             "CMOV$cmp $dst.hi,$src.hi" %}
12552   opcode(0x0F,0x40);
12553   ins_encode( enc_cmov(cmp), RegReg_Lo2( dst, src ), enc_cmov(cmp), RegReg_Hi2( dst, src ) );
12554   ins_pipe( pipe_cmov_reg_long );
12555 %}
12556 
12557 instruct cmovLL_mem_LEGT(cmpOp_commute cmp, flagsReg_long_LEGT flags, eRegL dst, load_long_memory src) %{
12558   match(Set dst (CMoveL (Binary cmp flags) (Binary dst (LoadL src))));
12559   predicate(VM_Version::supports_cmov() && ( _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt ));
12560   ins_cost(500);
12561   format %{ "CMOV$cmp $dst.lo,$src.lo\n\t"
12562             "CMOV$cmp $dst.hi,$src.hi+4" %}
12563   opcode(0x0F,0x40);
12564   ins_encode( enc_cmov(cmp), RegMem(dst, src), enc_cmov(cmp), RegMem_Hi(dst, src) );
12565   ins_pipe( pipe_cmov_reg_long );
12566 %}
12567 
12568 // Compare 2 longs and CMOVE ints.
12569 instruct cmovII_reg_LEGT(cmpOp_commute cmp, flagsReg_long_LEGT flags, rRegI dst, rRegI src) %{
12570   predicate(VM_Version::supports_cmov() && ( _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt ));
12571   match(Set dst (CMoveI (Binary cmp flags) (Binary dst src)));
12572   ins_cost(200);
12573   format %{ "CMOV$cmp $dst,$src" %}
12574   opcode(0x0F,0x40);
12575   ins_encode( enc_cmov(cmp), RegReg( dst, src ) );
12576   ins_pipe( pipe_cmov_reg );
12577 %}
12578 
12579 instruct cmovII_mem_LEGT(cmpOp_commute cmp, flagsReg_long_LEGT flags, rRegI dst, memory src) %{
12580   predicate(VM_Version::supports_cmov() && ( _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt ));
12581   match(Set dst (CMoveI (Binary cmp flags) (Binary dst (LoadI src))));
12582   ins_cost(250);
12583   format %{ "CMOV$cmp $dst,$src" %}
12584   opcode(0x0F,0x40);
12585   ins_encode( enc_cmov(cmp), RegMem( dst, src ) );
12586   ins_pipe( pipe_cmov_mem );
12587 %}
12588 
12589 // Compare 2 longs and CMOVE ptrs.
12590 instruct cmovPP_reg_LEGT(cmpOp_commute cmp, flagsReg_long_LEGT flags, eRegP dst, eRegP src) %{
12591   predicate(VM_Version::supports_cmov() && ( _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt ));
12592   match(Set dst (CMoveP (Binary cmp flags) (Binary dst src)));
12593   ins_cost(200);
12594   format %{ "CMOV$cmp $dst,$src" %}
12595   opcode(0x0F,0x40);
12596   ins_encode( enc_cmov(cmp), RegReg( dst, src ) );
12597   ins_pipe( pipe_cmov_reg );
12598 %}
12599 
12600 // Compare 2 longs and CMOVE doubles
12601 instruct cmovDDPR_reg_LEGT(cmpOp_commute cmp, flagsReg_long_LEGT flags, regDPR dst, regDPR src) %{
12602   predicate( UseSSE<=1 && _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt );
12603   match(Set dst (CMoveD (Binary cmp flags) (Binary dst src)));
12604   ins_cost(200);
12605   expand %{
12606     fcmovDPR_regS(cmp,flags,dst,src);
12607   %}
12608 %}
12609 
12610 // Compare 2 longs and CMOVE doubles
12611 instruct cmovDD_reg_LEGT(cmpOp_commute cmp, flagsReg_long_LEGT flags, regD dst, regD src) %{
12612   predicate( UseSSE>=2 && _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt );
12613   match(Set dst (CMoveD (Binary cmp flags) (Binary dst src)));
12614   ins_cost(200);
12615   expand %{
12616     fcmovD_regS(cmp,flags,dst,src);
12617   %}
12618 %}
12619 
12620 instruct cmovFFPR_reg_LEGT(cmpOp_commute cmp, flagsReg_long_LEGT flags, regFPR dst, regFPR src) %{
12621   predicate( UseSSE==0 && _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt );
12622   match(Set dst (CMoveF (Binary cmp flags) (Binary dst src)));
12623   ins_cost(200);
12624   expand %{
12625     fcmovFPR_regS(cmp,flags,dst,src);
12626   %}
12627 %}
12628 
12629 
12630 instruct cmovFF_reg_LEGT(cmpOp_commute cmp, flagsReg_long_LEGT flags, regF dst, regF src) %{
12631   predicate( UseSSE>=1 && _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt );
12632   match(Set dst (CMoveF (Binary cmp flags) (Binary dst src)));
12633   ins_cost(200);
12634   expand %{
12635     fcmovF_regS(cmp,flags,dst,src);
12636   %}
12637 %}
12638 
12639 
12640 // ============================================================================
12641 // Procedure Call/Return Instructions
12642 // Call Java Static Instruction
12643 // Note: If this code changes, the corresponding ret_addr_offset() and
12644 //       compute_padding() functions will have to be adjusted.
12645 instruct CallStaticJavaDirect(method meth) %{
12646   match(CallStaticJava);
12647   effect(USE meth);
12648 
12649   ins_cost(300);
12650   format %{ "CALL,static " %}
12651   opcode(0xE8); /* E8 cd */
12652   ins_encode( pre_call_resets,
12653               Java_Static_Call( meth ),
12654               call_epilog,
12655               post_call_FPU );
12656   ins_pipe( pipe_slow );
12657   ins_alignment(4);
12658 %}
12659 
12660 // Call Java Dynamic Instruction
12661 // Note: If this code changes, the corresponding ret_addr_offset() and
12662 //       compute_padding() functions will have to be adjusted.
12663 instruct CallDynamicJavaDirect(method meth) %{
12664   match(CallDynamicJava);
12665   effect(USE meth);
12666 
12667   ins_cost(300);
12668   format %{ "MOV    EAX,(oop)-1\n\t"
12669             "CALL,dynamic" %}
12670   opcode(0xE8); /* E8 cd */
12671   ins_encode( pre_call_resets,
12672               Java_Dynamic_Call( meth ),
12673               call_epilog,
12674               post_call_FPU );
12675   ins_pipe( pipe_slow );
12676   ins_alignment(4);
12677 %}
12678 
12679 // Call Runtime Instruction
12680 instruct CallRuntimeDirect(method meth) %{
12681   match(CallRuntime );
12682   effect(USE meth);
12683 
12684   ins_cost(300);
12685   format %{ "CALL,runtime " %}
12686   opcode(0xE8); /* E8 cd */
12687   // Use FFREEs to clear entries in float stack
12688   ins_encode( pre_call_resets,
12689               FFree_Float_Stack_All,
12690               Java_To_Runtime( meth ),
12691               post_call_FPU );
12692   ins_pipe( pipe_slow );
12693 %}
12694 
12695 // Call runtime without safepoint
12696 instruct CallLeafDirect(method meth) %{
12697   match(CallLeaf);
12698   effect(USE meth);
12699 
12700   ins_cost(300);
12701   format %{ "CALL_LEAF,runtime " %}
12702   opcode(0xE8); /* E8 cd */
12703   ins_encode( pre_call_resets,
12704               FFree_Float_Stack_All,
12705               Java_To_Runtime( meth ),
12706               Verify_FPU_For_Leaf, post_call_FPU );
12707   ins_pipe( pipe_slow );
12708 %}
12709 
12710 instruct CallLeafNoFPDirect(method meth) %{
12711   match(CallLeafNoFP);
12712   effect(USE meth);
12713 
12714   ins_cost(300);
12715   format %{ "CALL_LEAF_NOFP,runtime " %}
12716   opcode(0xE8); /* E8 cd */
12717   ins_encode(Java_To_Runtime(meth));
12718   ins_pipe( pipe_slow );
12719 %}
12720 
12721 
12722 // Return Instruction
12723 // Remove the return address & jump to it.
12724 instruct Ret() %{
12725   match(Return);
12726   format %{ "RET" %}
12727   opcode(0xC3);
12728   ins_encode(OpcP);
12729   ins_pipe( pipe_jmp );
12730 %}
12731 
12732 // Tail Call; Jump from runtime stub to Java code.
12733 // Also known as an 'interprocedural jump'.
12734 // Target of jump will eventually return to caller.
12735 // TailJump below removes the return address.
12736 instruct TailCalljmpInd(eRegP_no_EBP jump_target, eBXRegP method_oop) %{
12737   match(TailCall jump_target method_oop );
12738   ins_cost(300);
12739   format %{ "JMP    $jump_target \t# EBX holds method oop" %}
12740   opcode(0xFF, 0x4);  /* Opcode FF /4 */
12741   ins_encode( OpcP, RegOpc(jump_target) );
12742   ins_pipe( pipe_jmp );
12743 %}
12744 
12745 
12746 // Tail Jump; remove the return address; jump to target.
12747 // TailCall above leaves the return address around.
12748 instruct tailjmpInd(eRegP_no_EBP jump_target, eAXRegP ex_oop) %{
12749   match( TailJump jump_target ex_oop );
12750   ins_cost(300);
12751   format %{ "POP    EDX\t# pop return address into dummy\n\t"
12752             "JMP    $jump_target " %}
12753   opcode(0xFF, 0x4);  /* Opcode FF /4 */
12754   ins_encode( enc_pop_rdx,
12755               OpcP, RegOpc(jump_target) );
12756   ins_pipe( pipe_jmp );
12757 %}
12758 
12759 // Create exception oop: created by stack-crawling runtime code.
12760 // Created exception is now available to this handler, and is setup
12761 // just prior to jumping to this handler.  No code emitted.
12762 instruct CreateException( eAXRegP ex_oop )
12763 %{
12764   match(Set ex_oop (CreateEx));
12765 
12766   size(0);
12767   // use the following format syntax
12768   format %{ "# exception oop is in EAX; no code emitted" %}
12769   ins_encode();
12770   ins_pipe( empty );
12771 %}
12772 
12773 
12774 // Rethrow exception:
12775 // The exception oop will come in the first argument position.
12776 // Then JUMP (not call) to the rethrow stub code.
12777 instruct RethrowException()
12778 %{
12779   match(Rethrow);
12780 
12781   // use the following format syntax
12782   format %{ "JMP    rethrow_stub" %}
12783   ins_encode(enc_rethrow);
12784   ins_pipe( pipe_jmp );
12785 %}
12786 
12787 // inlined locking and unlocking
12788 
12789 instruct cmpFastLockRTM(eFlagsReg cr, eRegP object, eBXRegP box, eAXRegI tmp, eDXRegI scr, rRegI cx1, rRegI cx2) %{
12790   predicate(Compile::current()->use_rtm());
12791   match(Set cr (FastLock object box));
12792   effect(TEMP tmp, TEMP scr, TEMP cx1, TEMP cx2, USE_KILL box);
12793   ins_cost(300);
12794   format %{ "FASTLOCK $object,$box\t! kills $box,$tmp,$scr,$cx1,$cx2" %}
12795   ins_encode %{
12796     __ fast_lock($object$$Register, $box$$Register, $tmp$$Register,
12797                  $scr$$Register, $cx1$$Register, $cx2$$Register,
12798                  _counters, _rtm_counters, _stack_rtm_counters,
12799                  ((Method*)(ra_->C->method()->constant_encoding()))->method_data(),
12800                  true, ra_->C->profile_rtm());
12801   %}
12802   ins_pipe(pipe_slow);
12803 %}
12804 
12805 instruct cmpFastLock(eFlagsReg cr, eRegP object, eBXRegP box, eAXRegI tmp, eRegP scr) %{
12806   predicate(!Compile::current()->use_rtm());
12807   match(Set cr (FastLock object box));
12808   effect(TEMP tmp, TEMP scr, USE_KILL box);
12809   ins_cost(300);
12810   format %{ "FASTLOCK $object,$box\t! kills $box,$tmp,$scr" %}
12811   ins_encode %{
12812     __ fast_lock($object$$Register, $box$$Register, $tmp$$Register,
12813                  $scr$$Register, noreg, noreg, _counters, NULL, NULL, NULL, false, false);
12814   %}
12815   ins_pipe(pipe_slow);
12816 %}
12817 
12818 instruct cmpFastUnlock(eFlagsReg cr, eRegP object, eAXRegP box, eRegP tmp ) %{
12819   match(Set cr (FastUnlock object box));
12820   effect(TEMP tmp, USE_KILL box);
12821   ins_cost(300);
12822   format %{ "FASTUNLOCK $object,$box\t! kills $box,$tmp" %}
12823   ins_encode %{
12824     __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register, ra_->C->use_rtm());
12825   %}
12826   ins_pipe(pipe_slow);
12827 %}
12828 
12829 
12830 
12831 // ============================================================================
12832 // Safepoint Instruction
12833 instruct safePoint_poll(eFlagsReg cr) %{
12834   match(SafePoint);
12835   effect(KILL cr);
12836 
12837   // TODO-FIXME: we currently poll at offset 0 of the safepoint polling page.
12838   // On SPARC that might be acceptable as we can generate the address with
12839   // just a sethi, saving an or.  By polling at offset 0 we can end up
12840   // putting additional pressure on the index-0 in the D$.  Because of
12841   // alignment (just like the situation at hand) the lower indices tend
12842   // to see more traffic.  It'd be better to change the polling address
12843   // to offset 0 of the last $line in the polling page.
12844 
12845   format %{ "TSTL   #polladdr,EAX\t! Safepoint: poll for GC" %}
12846   ins_cost(125);
12847   size(6) ;
12848   ins_encode( Safepoint_Poll() );
12849   ins_pipe( ialu_reg_mem );
12850 %}
12851 
12852 
12853 // ============================================================================
12854 // This name is KNOWN by the ADLC and cannot be changed.
12855 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
12856 // for this guy.
12857 instruct tlsLoadP(eRegP dst, eFlagsReg cr) %{
12858   match(Set dst (ThreadLocal));
12859   effect(DEF dst, KILL cr);
12860 
12861   format %{ "MOV    $dst, Thread::current()" %}
12862   ins_encode %{
12863     Register dstReg = as_Register($dst$$reg);
12864     __ get_thread(dstReg);
12865   %}
12866   ins_pipe( ialu_reg_fat );
12867 %}
12868 
12869 
12870 
12871 //----------PEEPHOLE RULES-----------------------------------------------------
12872 // These must follow all instruction definitions as they use the names
12873 // defined in the instructions definitions.
12874 //
12875 // peepmatch ( root_instr_name [preceding_instruction]* );
12876 //
12877 // peepconstraint %{
12878 // (instruction_number.operand_name relational_op instruction_number.operand_name
12879 //  [, ...] );
12880 // // instruction numbers are zero-based using left to right order in peepmatch
12881 //
12882 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
12883 // // provide an instruction_number.operand_name for each operand that appears
12884 // // in the replacement instruction's match rule
12885 //
12886 // ---------VM FLAGS---------------------------------------------------------
12887 //
12888 // All peephole optimizations can be turned off using -XX:-OptoPeephole
12889 //
12890 // Each peephole rule is given an identifying number starting with zero and
12891 // increasing by one in the order seen by the parser.  An individual peephole
12892 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
12893 // on the command-line.
12894 //
12895 // ---------CURRENT LIMITATIONS----------------------------------------------
12896 //
12897 // Only match adjacent instructions in same basic block
12898 // Only equality constraints
12899 // Only constraints between operands, not (0.dest_reg == EAX_enc)
12900 // Only one replacement instruction
12901 //
12902 // ---------EXAMPLE----------------------------------------------------------
12903 //
12904 // // pertinent parts of existing instructions in architecture description
12905 // instruct movI(rRegI dst, rRegI src) %{
12906 //   match(Set dst (CopyI src));
12907 // %}
12908 //
12909 // instruct incI_eReg(rRegI dst, immI1 src, eFlagsReg cr) %{
12910 //   match(Set dst (AddI dst src));
12911 //   effect(KILL cr);
12912 // %}
12913 //
12914 // // Change (inc mov) to lea
12915 // peephole %{
12916 //   // increment preceeded by register-register move
12917 //   peepmatch ( incI_eReg movI );
12918 //   // require that the destination register of the increment
12919 //   // match the destination register of the move
12920 //   peepconstraint ( 0.dst == 1.dst );
12921 //   // construct a replacement instruction that sets
12922 //   // the destination to ( move's source register + one )
12923 //   peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
12924 // %}
12925 //
12926 // Implementation no longer uses movX instructions since
12927 // machine-independent system no longer uses CopyX nodes.
12928 //
12929 // peephole %{
12930 //   peepmatch ( incI_eReg movI );
12931 //   peepconstraint ( 0.dst == 1.dst );
12932 //   peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
12933 // %}
12934 //
12935 // peephole %{
12936 //   peepmatch ( decI_eReg movI );
12937 //   peepconstraint ( 0.dst == 1.dst );
12938 //   peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
12939 // %}
12940 //
12941 // peephole %{
12942 //   peepmatch ( addI_eReg_imm movI );
12943 //   peepconstraint ( 0.dst == 1.dst );
12944 //   peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
12945 // %}
12946 //
12947 // peephole %{
12948 //   peepmatch ( addP_eReg_imm movP );
12949 //   peepconstraint ( 0.dst == 1.dst );
12950 //   peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
12951 // %}
12952 
12953 // // Change load of spilled value to only a spill
12954 // instruct storeI(memory mem, rRegI src) %{
12955 //   match(Set mem (StoreI mem src));
12956 // %}
12957 //
12958 // instruct loadI(rRegI dst, memory mem) %{
12959 //   match(Set dst (LoadI mem));
12960 // %}
12961 //
12962 peephole %{
12963   peepmatch ( loadI storeI );
12964   peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
12965   peepreplace ( storeI( 1.mem 1.mem 1.src ) );
12966 %}
12967 
12968 //----------SMARTSPILL RULES---------------------------------------------------
12969 // These must follow all instruction definitions as they use the names
12970 // defined in the instructions definitions.