src/cpu/x86/vm/x86_32.ad
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 8076276 Sdiff src/cpu/x86/vm

src/cpu/x86/vm/x86_32.ad

Print this page
rev 8344 : 8076276: Add support for AVX512
Reviewed-by: kvn, roland
Contributed-by: michael.c.berg@intel.com


  84 // the stack will not have this element so FPR1 == st(0) from the
  85 // oopMap viewpoint. This same weirdness with numbering causes
  86 // instruction encoding to have to play games with the register
  87 // encode to correct for this 0/1 issue. See MachSpillCopyNode::implementation
  88 // where it does flt->flt moves to see an example
  89 //
  90 reg_def FPR1L( SOC, SOC, Op_RegF, 1, as_FloatRegister(0)->as_VMReg());
  91 reg_def FPR1H( SOC, SOC, Op_RegF, 1, as_FloatRegister(0)->as_VMReg()->next());
  92 reg_def FPR2L( SOC, SOC, Op_RegF, 2, as_FloatRegister(1)->as_VMReg());
  93 reg_def FPR2H( SOC, SOC, Op_RegF, 2, as_FloatRegister(1)->as_VMReg()->next());
  94 reg_def FPR3L( SOC, SOC, Op_RegF, 3, as_FloatRegister(2)->as_VMReg());
  95 reg_def FPR3H( SOC, SOC, Op_RegF, 3, as_FloatRegister(2)->as_VMReg()->next());
  96 reg_def FPR4L( SOC, SOC, Op_RegF, 4, as_FloatRegister(3)->as_VMReg());
  97 reg_def FPR4H( SOC, SOC, Op_RegF, 4, as_FloatRegister(3)->as_VMReg()->next());
  98 reg_def FPR5L( SOC, SOC, Op_RegF, 5, as_FloatRegister(4)->as_VMReg());
  99 reg_def FPR5H( SOC, SOC, Op_RegF, 5, as_FloatRegister(4)->as_VMReg()->next());
 100 reg_def FPR6L( SOC, SOC, Op_RegF, 6, as_FloatRegister(5)->as_VMReg());
 101 reg_def FPR6H( SOC, SOC, Op_RegF, 6, as_FloatRegister(5)->as_VMReg()->next());
 102 reg_def FPR7L( SOC, SOC, Op_RegF, 7, as_FloatRegister(6)->as_VMReg());
 103 reg_def FPR7H( SOC, SOC, Op_RegF, 7, as_FloatRegister(6)->as_VMReg()->next());











 104 
 105 // Specify priority of register selection within phases of register
 106 // allocation.  Highest priority is first.  A useful heuristic is to
 107 // give registers a low priority when they are required by machine
 108 // instructions, like EAX and EDX.  Registers which are used as
 109 // pairs must fall on an even boundary (witness the FPR#L's in this list).
 110 // For the Intel integer registers, the equivalent Long pairs are
 111 // EDX:EAX, EBX:ECX, and EDI:EBP.
 112 alloc_class chunk0( ECX,   EBX,   EBP,   EDI,   EAX,   EDX,   ESI, ESP,
 113                     FPR0L, FPR0H, FPR1L, FPR1H, FPR2L, FPR2H,
 114                     FPR3L, FPR3H, FPR4L, FPR4H, FPR5L, FPR5H,
 115                     FPR6L, FPR6H, FPR7L, FPR7H );

 116 
 117 
 118 //----------Architecture Description Register Classes--------------------------
 119 // Several register classes are automatically defined based upon information in
 120 // this architecture description.
 121 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 122 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 123 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 124 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 125 //
 126 // Class for no registers (empty set).
 127 reg_class no_reg();
 128 
 129 // Class for all registers
 130 reg_class any_reg_with_ebp(EAX, EDX, EBP, EDI, ESI, ECX, EBX, ESP);
 131 // Class for all registers (excluding EBP)
 132 reg_class any_reg_no_ebp(EAX, EDX, EDI, ESI, ECX, EBX, ESP);
 133 // Dynamic register class that selects at runtime between register classes
 134 // any_reg and any_no_ebp_reg (depending on the value of the flag PreserveFramePointer). 
 135 // Equivalent to: return PreserveFramePointer ? any_no_ebp_reg : any_reg;


 262   return operand;
 263 }
 264 
 265 // Buffer for 128-bits masks used by SSE instructions.
 266 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
 267 
 268 // Static initialization during VM startup.
 269 static jlong *float_signmask_pool  = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF));
 270 static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF));
 271 static jlong *float_signflip_pool  = double_quadword(&fp_signmask_pool[3*2], CONST64(0x8000000080000000), CONST64(0x8000000080000000));
 272 static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
 273 
 274 // Offset hacking within calls.
 275 static int pre_call_resets_size() {
 276   int size = 0;
 277   Compile* C = Compile::current();
 278   if (C->in_24_bit_fp_mode()) {
 279     size += 6; // fldcw
 280   }
 281   if (C->max_vector_size() > 16) {

 282     size += 3; // vzeroupper
 283   }

 284   return size;
 285 }
 286 
 287 // !!!!! Special hack to get all type of calls to specify the byte offset
 288 //       from the start of the call to the point where the return address
 289 //       will point.
 290 int MachCallStaticJavaNode::ret_addr_offset() {
 291   return 5 + pre_call_resets_size();  // 5 bytes from start of call to where return address points  
 292 }
 293 
 294 int MachCallDynamicJavaNode::ret_addr_offset() {
 295   return 10 + pre_call_resets_size();  // 10 bytes from start of call to where return address points
 296 }
 297 
 298 static int sizeof_FFree_Float_Stack_All = -1;
 299 
 300 int MachCallRuntimeNode::ret_addr_offset() {
 301   assert(sizeof_FFree_Float_Stack_All != -1, "must have been emitted already");
 302   return sizeof_FFree_Float_Stack_All + 5 + pre_call_resets_size();
 303 }


 750     emit_opcode  (*cbuf, opcode );
 751     encode_RegMem(*cbuf, Matcher::_regEncode[reg], ESP_enc, 0x4, 0, offset, relocInfo::none);
 752 #ifndef PRODUCT
 753   } else if( !do_size ) {
 754     if( size != 0 ) st->print("\n\t");
 755     if( opcode == 0x8B || opcode == 0x89 ) { // MOV
 756       if( is_load ) st->print("%s   %s,[ESP + #%d]",op_str,Matcher::regName[reg],offset);
 757       else          st->print("%s   [ESP + #%d],%s",op_str,offset,Matcher::regName[reg]);
 758     } else { // FLD, FST, PUSH, POP
 759       st->print("%s [ESP + #%d]",op_str,offset);
 760     }
 761 #endif
 762   }
 763   int offset_size = (offset == 0) ? 0 : ((offset <= 127) ? 1 : 4);
 764   return size+3+offset_size;
 765 }
 766 
 767 // Helper for XMM registers.  Extra opcode bits, limited syntax.
 768 static int impl_x_helper( CodeBuffer *cbuf, bool do_size, bool is_load,
 769                          int offset, int reg_lo, int reg_hi, int size, outputStream* st ) {






 770   if (cbuf) {
 771     MacroAssembler _masm(cbuf);
 772     if (reg_lo+1 == reg_hi) { // double move?
 773       if (is_load) {
 774         __ movdbl(as_XMMRegister(Matcher::_regEncode[reg_lo]), Address(rsp, offset));
 775       } else {
 776         __ movdbl(Address(rsp, offset), as_XMMRegister(Matcher::_regEncode[reg_lo]));
 777       }
 778     } else {
 779       if (is_load) {
 780         __ movflt(as_XMMRegister(Matcher::_regEncode[reg_lo]), Address(rsp, offset));
 781       } else {
 782         __ movflt(Address(rsp, offset), as_XMMRegister(Matcher::_regEncode[reg_lo]));
 783       }
 784     }
 785 #ifndef PRODUCT
 786   } else if (!do_size) {
 787     if (size != 0) st->print("\n\t");
 788     if (reg_lo+1 == reg_hi) { // double move?
 789       if (is_load) st->print("%s %s,[ESP + #%d]",
 790                               UseXmmLoadAndClearUpper ? "MOVSD " : "MOVLPD",
 791                               Matcher::regName[reg_lo], offset);
 792       else         st->print("MOVSD  [ESP + #%d],%s",
 793                               offset, Matcher::regName[reg_lo]);
 794     } else {
 795       if (is_load) st->print("MOVSS  %s,[ESP + #%d]",
 796                               Matcher::regName[reg_lo], offset);
 797       else         st->print("MOVSS  [ESP + #%d],%s",
 798                               offset, Matcher::regName[reg_lo]);
 799     }
 800 #endif
 801   }
 802   int offset_size = (offset == 0) ? 0 : ((offset <= 127) ? 1 : 4);










 803   // VEX_2bytes prefix is used if UseAVX > 0, so it takes the same 2 bytes as SIMD prefix.
 804   return size+5+offset_size;
 805 }
 806 
 807 
 808 static int impl_movx_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
 809                             int src_hi, int dst_hi, int size, outputStream* st ) {
 810   if (cbuf) {
 811     MacroAssembler _masm(cbuf);
 812     if (src_lo+1 == src_hi && dst_lo+1 == dst_hi) { // double move?
 813       __ movdbl(as_XMMRegister(Matcher::_regEncode[dst_lo]),
 814                 as_XMMRegister(Matcher::_regEncode[src_lo]));
 815     } else {
 816       __ movflt(as_XMMRegister(Matcher::_regEncode[dst_lo]),
 817                 as_XMMRegister(Matcher::_regEncode[src_lo]));
 818     }
 819 #ifndef PRODUCT
 820   } else if (!do_size) {
 821     if (size != 0) st->print("\n\t");
 822     if (UseXmmRegToRegMoveAll) {//Use movaps,movapd to move between xmm registers
 823       if (src_lo+1 == src_hi && dst_lo+1 == dst_hi) { // double move?
 824         st->print("MOVAPD %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]);
 825       } else {
 826         st->print("MOVAPS %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]);
 827       }
 828     } else {
 829       if( src_lo+1 == src_hi && dst_lo+1 == dst_hi ) { // double move?
 830         st->print("MOVSD  %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]);
 831       } else {
 832         st->print("MOVSS  %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]);
 833       }
 834     }
 835 #endif
 836   }
 837   // VEX_2bytes prefix is used if UseAVX > 0, and it takes the same 2 bytes as SIMD prefix.
 838   // Only MOVAPS SSE prefix uses 1 byte.
 839   int sz = 4;
 840   if (!(src_lo+1 == src_hi && dst_lo+1 == dst_hi) &&
 841       UseXmmRegToRegMoveAll && (UseAVX == 0)) sz = 3;
 842   return size + sz;
 843 }
 844 
 845 static int impl_movgpr2x_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
 846                             int src_hi, int dst_hi, int size, outputStream* st ) {
 847   // 32-bit
 848   if (cbuf) {
 849     MacroAssembler _masm(cbuf);
 850     __ movdl(as_XMMRegister(Matcher::_regEncode[dst_lo]),
 851              as_Register(Matcher::_regEncode[src_lo]));
 852 #ifndef PRODUCT
 853   } else if (!do_size) {
 854     st->print("movdl   %s, %s\t# spill", Matcher::regName[dst_lo], Matcher::regName[src_lo]);
 855 #endif
 856   }
 857   return 4;
 858 }
 859 
 860 
 861 static int impl_movx2gpr_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
 862                                  int src_hi, int dst_hi, int size, outputStream* st ) {
 863   // 32-bit
 864   if (cbuf) {
 865     MacroAssembler _masm(cbuf);
 866     __ movdl(as_Register(Matcher::_regEncode[dst_lo]),
 867              as_XMMRegister(Matcher::_regEncode[src_lo]));
 868 #ifndef PRODUCT
 869   } else if (!do_size) {
 870     st->print("movdl   %s, %s\t# spill", Matcher::regName[dst_lo], Matcher::regName[src_lo]);
 871 #endif
 872   }
 873   return 4;
 874 }
 875 
 876 static int impl_mov_helper( CodeBuffer *cbuf, bool do_size, int src, int dst, int size, outputStream* st ) {
 877   if( cbuf ) {
 878     emit_opcode(*cbuf, 0x8B );
 879     emit_rm    (*cbuf, 0x3, Matcher::_regEncode[dst], Matcher::_regEncode[src] );
 880 #ifndef PRODUCT
 881   } else if( !do_size ) {
 882     if( size != 0 ) st->print("\n\t");
 883     st->print("MOV    %s,%s",Matcher::regName[dst],Matcher::regName[src]);
 884 #endif
 885   }
 886   return size+2;
 887 }
 888 
 889 static int impl_fp_store_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int src_hi, int dst_lo, int dst_hi,
 890                                  int offset, int size, outputStream* st ) {
 891   if( src_lo != FPR1L_num ) {      // Move value to top of FP stack, if not already there
 892     if( cbuf ) {
 893       emit_opcode( *cbuf, 0xD9 );  // FLD (i.e., push it)


 924                             int stack_offset, int reg, uint ireg, outputStream* st);
 925 
 926 static int vec_stack_to_stack_helper(CodeBuffer *cbuf, bool do_size, int src_offset,
 927                                      int dst_offset, uint ireg, outputStream* st) {
 928   int calc_size = 0;
 929   int src_offset_size = (src_offset == 0) ? 0 : ((src_offset < 0x80) ? 1 : 4);
 930   int dst_offset_size = (dst_offset == 0) ? 0 : ((dst_offset < 0x80) ? 1 : 4);
 931   switch (ireg) {
 932   case Op_VecS:
 933     calc_size = 3+src_offset_size + 3+dst_offset_size;
 934     break;
 935   case Op_VecD:
 936     calc_size = 3+src_offset_size + 3+dst_offset_size;
 937     src_offset += 4;
 938     dst_offset += 4;
 939     src_offset_size = (src_offset == 0) ? 0 : ((src_offset < 0x80) ? 1 : 4);
 940     dst_offset_size = (dst_offset == 0) ? 0 : ((dst_offset < 0x80) ? 1 : 4);
 941     calc_size += 3+src_offset_size + 3+dst_offset_size;
 942     break;
 943   case Op_VecX:
 944     calc_size = 6 + 6 + 5+src_offset_size + 5+dst_offset_size;
 945     break;
 946   case Op_VecY:

 947     calc_size = 6 + 6 + 5+src_offset_size + 5+dst_offset_size;
 948     break;
 949   default:
 950     ShouldNotReachHere();
 951   }
 952   if (cbuf) {
 953     MacroAssembler _masm(cbuf);
 954     int offset = __ offset();
 955     switch (ireg) {
 956     case Op_VecS:
 957       __ pushl(Address(rsp, src_offset));
 958       __ popl (Address(rsp, dst_offset));
 959       break;
 960     case Op_VecD:
 961       __ pushl(Address(rsp, src_offset));
 962       __ popl (Address(rsp, dst_offset));
 963       __ pushl(Address(rsp, src_offset+4));
 964       __ popl (Address(rsp, dst_offset+4));
 965       break;
 966     case Op_VecX:
 967       __ movdqu(Address(rsp, -16), xmm0);
 968       __ movdqu(xmm0, Address(rsp, src_offset));
 969       __ movdqu(Address(rsp, dst_offset), xmm0);
 970       __ movdqu(xmm0, Address(rsp, -16));
 971       break;
 972     case Op_VecY:
 973       __ vmovdqu(Address(rsp, -32), xmm0);
 974       __ vmovdqu(xmm0, Address(rsp, src_offset));
 975       __ vmovdqu(Address(rsp, dst_offset), xmm0);
 976       __ vmovdqu(xmm0, Address(rsp, -32));





 977       break;
 978     default:
 979       ShouldNotReachHere();
 980     }
 981     int size = __ offset() - offset;
 982     assert(size == calc_size, "incorrect size calculattion");
 983     return size;
 984 #ifndef PRODUCT
 985   } else if (!do_size) {
 986     switch (ireg) {
 987     case Op_VecS:
 988       st->print("pushl   [rsp + #%d]\t# 32-bit mem-mem spill\n\t"
 989                 "popl    [rsp + #%d]",
 990                 src_offset, dst_offset);
 991       break;
 992     case Op_VecD:
 993       st->print("pushl   [rsp + #%d]\t# 64-bit mem-mem spill\n\t"
 994                 "popq    [rsp + #%d]\n\t"
 995                 "pushl   [rsp + #%d]\n\t"
 996                 "popq    [rsp + #%d]",
 997                 src_offset, dst_offset, src_offset+4, dst_offset+4);
 998       break;
 999      case Op_VecX:
1000       st->print("movdqu  [rsp - #16], xmm0\t# 128-bit mem-mem spill\n\t"
1001                 "movdqu  xmm0, [rsp + #%d]\n\t"
1002                 "movdqu  [rsp + #%d], xmm0\n\t"
1003                 "movdqu  xmm0, [rsp - #16]",
1004                 src_offset, dst_offset);
1005       break;
1006     case Op_VecY:
1007       st->print("vmovdqu [rsp - #32], xmm0\t# 256-bit mem-mem spill\n\t"
1008                 "vmovdqu xmm0, [rsp + #%d]\n\t"
1009                 "vmovdqu [rsp + #%d], xmm0\n\t"
1010                 "vmovdqu xmm0, [rsp - #32]",
1011                 src_offset, dst_offset);






1012       break;
1013     default:
1014       ShouldNotReachHere();
1015     }
1016 #endif
1017   }
1018   return calc_size;
1019 }
1020 
1021 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
1022   // Get registers to move
1023   OptoReg::Name src_second = ra_->get_reg_second(in(1));
1024   OptoReg::Name src_first = ra_->get_reg_first(in(1));
1025   OptoReg::Name dst_second = ra_->get_reg_second(this );
1026   OptoReg::Name dst_first = ra_->get_reg_first(this );
1027 
1028   enum RC src_second_rc = rc_class(src_second);
1029   enum RC src_first_rc = rc_class(src_first);
1030   enum RC dst_second_rc = rc_class(dst_second);
1031   enum RC dst_first_rc = rc_class(dst_first);
1032 
1033   assert( OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
1034 
1035   // Generate spill code!
1036   int size = 0;
1037 
1038   if( src_first == dst_first && src_second == dst_second )
1039     return size;            // Self copy, no move
1040 
1041   if (bottom_type()->isa_vect() != NULL) {
1042     uint ireg = ideal_reg();
1043     assert((src_first_rc != rc_int && dst_first_rc != rc_int), "sanity");
1044     assert((src_first_rc != rc_float && dst_first_rc != rc_float), "sanity");
1045     assert((ireg == Op_VecS || ireg == Op_VecD || ireg == Op_VecX || ireg == Op_VecY), "sanity");
1046     if( src_first_rc == rc_stack && dst_first_rc == rc_stack ) {
1047       // mem -> mem
1048       int src_offset = ra_->reg2offset(src_first);
1049       int dst_offset = ra_->reg2offset(dst_first);
1050       return vec_stack_to_stack_helper(cbuf, do_size, src_offset, dst_offset, ireg, st);
1051     } else if (src_first_rc == rc_xmm && dst_first_rc == rc_xmm ) {
1052       return vec_mov_helper(cbuf, do_size, src_first, dst_first, src_second, dst_second, ireg, st);
1053     } else if (src_first_rc == rc_xmm && dst_first_rc == rc_stack ) {
1054       int stack_offset = ra_->reg2offset(dst_first);
1055       return vec_spill_helper(cbuf, do_size, false, stack_offset, src_first, ireg, st);
1056     } else if (src_first_rc == rc_stack && dst_first_rc == rc_xmm ) {
1057       int stack_offset = ra_->reg2offset(src_first);
1058       return vec_spill_helper(cbuf, do_size, true,  stack_offset, dst_first, ireg, st);
1059     } else {
1060       ShouldNotReachHere();
1061     }
1062   }
1063 
1064   // --------------------------------------
1065   // Check for mem-mem move.  push/pop to move.


3981   predicate( UseSSE < 2 );
3982   constraint(ALLOC_IN_RC(fp_flt_reg));
3983   match(RegF);
3984   match(regFPR1);
3985   format %{ %}
3986   interface(REG_INTER);
3987 %}
3988 
3989 // Float register operands
3990 operand regFPR1(regFPR reg) %{
3991   predicate( UseSSE < 2 );
3992   constraint(ALLOC_IN_RC(fp_flt_reg0));
3993   match(reg);
3994   format %{ "FPR1" %}
3995   interface(REG_INTER);
3996 %}
3997 
3998 // XMM Float register operands
3999 operand regF() %{
4000   predicate( UseSSE>=1 );
4001   constraint(ALLOC_IN_RC(float_reg));
4002   match(RegF);
4003   format %{ %}
4004   interface(REG_INTER);
4005 %}
4006 
4007 // XMM Double register operands
4008 operand regD() %{
4009   predicate( UseSSE>=2 );
4010   constraint(ALLOC_IN_RC(double_reg));
4011   match(RegD);
4012   format %{ %}
4013   interface(REG_INTER);
4014 %}
4015 

































4016 
4017 //----------Memory Operands----------------------------------------------------
4018 // Direct Memory Operand
4019 operand direct(immP addr) %{
4020   match(addr);
4021 
4022   format %{ "[$addr]" %}
4023   interface(MEMORY_INTER) %{
4024     base(0xFFFFFFFF);
4025     index(0x4);
4026     scale(0x0);
4027     disp($addr);
4028   %}
4029 %}
4030 
4031 // Indirect Memory Operand
4032 operand indirect(eRegP reg) %{
4033   constraint(ALLOC_IN_RC(int_reg));
4034   match(reg);
4035 


11151   match(Set dst (ConvL2F src));
11152   effect( KILL cr );
11153   format %{ "PUSH   $src.hi\t# Convert long to single float\n\t"
11154             "PUSH   $src.lo\n\t"
11155             "FILD   ST,[ESP + #0]\n\t"
11156             "ADD    ESP,8\n\t"
11157             "FSTP_S $dst\t# F-round" %}
11158   opcode(0xDF, 0x5);  /* DF /5 */
11159   ins_encode(convert_long_double(src), Pop_Mem_FPR(dst));
11160   ins_pipe( pipe_slow );
11161 %}
11162 
11163 instruct convL2I_reg( rRegI dst, eRegL src ) %{
11164   match(Set dst (ConvL2I src));
11165   effect( DEF dst, USE src );
11166   format %{ "MOV    $dst,$src.lo" %}
11167   ins_encode(enc_CopyL_Lo(dst,src));
11168   ins_pipe( ialu_reg_reg );
11169 %}
11170 
11171 
11172 instruct MoveF2I_stack_reg(rRegI dst, stackSlotF src) %{
11173   match(Set dst (MoveF2I src));
11174   effect( DEF dst, USE src );
11175   ins_cost(100);
11176   format %{ "MOV    $dst,$src\t# MoveF2I_stack_reg" %}
11177   ins_encode %{
11178     __ movl($dst$$Register, Address(rsp, $src$$disp));
11179   %}
11180   ins_pipe( ialu_reg_mem );
11181 %}
11182 
11183 instruct MoveFPR2I_reg_stack(stackSlotI dst, regFPR src) %{
11184   predicate(UseSSE==0);
11185   match(Set dst (MoveF2I src));
11186   effect( DEF dst, USE src );
11187 
11188   ins_cost(125);
11189   format %{ "FST_S  $dst,$src\t# MoveF2I_reg_stack" %}
11190   ins_encode( Pop_Mem_Reg_FPR(dst, src) );
11191   ins_pipe( fpu_mem_reg );




  84 // the stack will not have this element so FPR1 == st(0) from the
  85 // oopMap viewpoint. This same weirdness with numbering causes
  86 // instruction encoding to have to play games with the register
  87 // encode to correct for this 0/1 issue. See MachSpillCopyNode::implementation
  88 // where it does flt->flt moves to see an example
  89 //
  90 reg_def FPR1L( SOC, SOC, Op_RegF, 1, as_FloatRegister(0)->as_VMReg());
  91 reg_def FPR1H( SOC, SOC, Op_RegF, 1, as_FloatRegister(0)->as_VMReg()->next());
  92 reg_def FPR2L( SOC, SOC, Op_RegF, 2, as_FloatRegister(1)->as_VMReg());
  93 reg_def FPR2H( SOC, SOC, Op_RegF, 2, as_FloatRegister(1)->as_VMReg()->next());
  94 reg_def FPR3L( SOC, SOC, Op_RegF, 3, as_FloatRegister(2)->as_VMReg());
  95 reg_def FPR3H( SOC, SOC, Op_RegF, 3, as_FloatRegister(2)->as_VMReg()->next());
  96 reg_def FPR4L( SOC, SOC, Op_RegF, 4, as_FloatRegister(3)->as_VMReg());
  97 reg_def FPR4H( SOC, SOC, Op_RegF, 4, as_FloatRegister(3)->as_VMReg()->next());
  98 reg_def FPR5L( SOC, SOC, Op_RegF, 5, as_FloatRegister(4)->as_VMReg());
  99 reg_def FPR5H( SOC, SOC, Op_RegF, 5, as_FloatRegister(4)->as_VMReg()->next());
 100 reg_def FPR6L( SOC, SOC, Op_RegF, 6, as_FloatRegister(5)->as_VMReg());
 101 reg_def FPR6H( SOC, SOC, Op_RegF, 6, as_FloatRegister(5)->as_VMReg()->next());
 102 reg_def FPR7L( SOC, SOC, Op_RegF, 7, as_FloatRegister(6)->as_VMReg());
 103 reg_def FPR7H( SOC, SOC, Op_RegF, 7, as_FloatRegister(6)->as_VMReg()->next());
 104 //
 105 // Empty fill registers, which are never used, but supply alignment to xmm regs
 106 //
 107 reg_def FILL0( SOC, SOC, Op_RegF, 7, as_FloatRegister(6)->as_VMReg()->next(2));
 108 reg_def FILL1( SOC, SOC, Op_RegF, 7, as_FloatRegister(6)->as_VMReg()->next(3));
 109 reg_def FILL2( SOC, SOC, Op_RegF, 7, as_FloatRegister(6)->as_VMReg()->next(4));
 110 reg_def FILL3( SOC, SOC, Op_RegF, 7, as_FloatRegister(6)->as_VMReg()->next(5));
 111 reg_def FILL4( SOC, SOC, Op_RegF, 7, as_FloatRegister(6)->as_VMReg()->next(6));
 112 reg_def FILL5( SOC, SOC, Op_RegF, 7, as_FloatRegister(6)->as_VMReg()->next(7));
 113 reg_def FILL6( SOC, SOC, Op_RegF, 7, as_FloatRegister(6)->as_VMReg()->next(8));
 114 reg_def FILL7( SOC, SOC, Op_RegF, 7, as_FloatRegister(6)->as_VMReg()->next(9));
 115 
 116 // Specify priority of register selection within phases of register
 117 // allocation.  Highest priority is first.  A useful heuristic is to
 118 // give registers a low priority when they are required by machine
 119 // instructions, like EAX and EDX.  Registers which are used as
 120 // pairs must fall on an even boundary (witness the FPR#L's in this list).
 121 // For the Intel integer registers, the equivalent Long pairs are
 122 // EDX:EAX, EBX:ECX, and EDI:EBP.
 123 alloc_class chunk0( ECX,   EBX,   EBP,   EDI,   EAX,   EDX,   ESI, ESP,
 124                     FPR0L, FPR0H, FPR1L, FPR1H, FPR2L, FPR2H,
 125                     FPR3L, FPR3H, FPR4L, FPR4H, FPR5L, FPR5H,
 126                     FPR6L, FPR6H, FPR7L, FPR7H,
 127                     FILL0, FILL1, FILL2, FILL3, FILL4, FILL5, FILL6, FILL7);
 128 
 129 
 130 //----------Architecture Description Register Classes--------------------------
 131 // Several register classes are automatically defined based upon information in
 132 // this architecture description.
 133 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 134 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 135 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 136 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 137 //
 138 // Class for no registers (empty set).
 139 reg_class no_reg();
 140 
 141 // Class for all registers
 142 reg_class any_reg_with_ebp(EAX, EDX, EBP, EDI, ESI, ECX, EBX, ESP);
 143 // Class for all registers (excluding EBP)
 144 reg_class any_reg_no_ebp(EAX, EDX, EDI, ESI, ECX, EBX, ESP);
 145 // Dynamic register class that selects at runtime between register classes
 146 // any_reg and any_no_ebp_reg (depending on the value of the flag PreserveFramePointer).
 147 // Equivalent to: return PreserveFramePointer ? any_no_ebp_reg : any_reg;


 274   return operand;
 275 }
 276 
 277 // Buffer for 128-bits masks used by SSE instructions.
 278 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
 279 
 280 // Static initialization during VM startup.
 281 static jlong *float_signmask_pool  = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF));
 282 static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF));
 283 static jlong *float_signflip_pool  = double_quadword(&fp_signmask_pool[3*2], CONST64(0x8000000080000000), CONST64(0x8000000080000000));
 284 static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
 285 
 286 // Offset hacking within calls.
 287 static int pre_call_resets_size() {
 288   int size = 0;
 289   Compile* C = Compile::current();
 290   if (C->in_24_bit_fp_mode()) {
 291     size += 6; // fldcw
 292   }
 293   if (C->max_vector_size() > 16) {
 294     if(UseAVX <= 2) {
 295       size += 3; // vzeroupper
 296     }
 297   }
 298   return size;
 299 }
 300 
 301 // !!!!! Special hack to get all type of calls to specify the byte offset
 302 //       from the start of the call to the point where the return address
 303 //       will point.
 304 int MachCallStaticJavaNode::ret_addr_offset() {
 305   return 5 + pre_call_resets_size();  // 5 bytes from start of call to where return address points
 306 }
 307 
 308 int MachCallDynamicJavaNode::ret_addr_offset() {
 309   return 10 + pre_call_resets_size();  // 10 bytes from start of call to where return address points
 310 }
 311 
 312 static int sizeof_FFree_Float_Stack_All = -1;
 313 
 314 int MachCallRuntimeNode::ret_addr_offset() {
 315   assert(sizeof_FFree_Float_Stack_All != -1, "must have been emitted already");
 316   return sizeof_FFree_Float_Stack_All + 5 + pre_call_resets_size();
 317 }


 764     emit_opcode  (*cbuf, opcode );
 765     encode_RegMem(*cbuf, Matcher::_regEncode[reg], ESP_enc, 0x4, 0, offset, relocInfo::none);
 766 #ifndef PRODUCT
 767   } else if( !do_size ) {
 768     if( size != 0 ) st->print("\n\t");
 769     if( opcode == 0x8B || opcode == 0x89 ) { // MOV
 770       if( is_load ) st->print("%s   %s,[ESP + #%d]",op_str,Matcher::regName[reg],offset);
 771       else          st->print("%s   [ESP + #%d],%s",op_str,offset,Matcher::regName[reg]);
 772     } else { // FLD, FST, PUSH, POP
 773       st->print("%s [ESP + #%d]",op_str,offset);
 774     }
 775 #endif
 776   }
 777   int offset_size = (offset == 0) ? 0 : ((offset <= 127) ? 1 : 4);
 778   return size+3+offset_size;
 779 }
 780 
 781 // Helper for XMM registers.  Extra opcode bits, limited syntax.
 782 static int impl_x_helper( CodeBuffer *cbuf, bool do_size, bool is_load,
 783                          int offset, int reg_lo, int reg_hi, int size, outputStream* st ) {
 784   int in_size_in_bits = Assembler::EVEX_32bit;
 785   int evex_encoding = 0;
 786   if (reg_lo+1 == reg_hi) {
 787     in_size_in_bits = Assembler::EVEX_64bit;
 788     evex_encoding = Assembler::VEX_W;
 789   }
 790   if (cbuf) {
 791     MacroAssembler _masm(cbuf);
 792     if (reg_lo+1 == reg_hi) { // double move?
 793       if (is_load) {
 794         __ movdbl(as_XMMRegister(Matcher::_regEncode[reg_lo]), Address(rsp, offset));
 795       } else {
 796         __ movdbl(Address(rsp, offset), as_XMMRegister(Matcher::_regEncode[reg_lo]));
 797       }
 798     } else {
 799       if (is_load) {
 800         __ movflt(as_XMMRegister(Matcher::_regEncode[reg_lo]), Address(rsp, offset));
 801       } else {
 802         __ movflt(Address(rsp, offset), as_XMMRegister(Matcher::_regEncode[reg_lo]));
 803       }
 804     }
 805 #ifndef PRODUCT
 806   } else if (!do_size) {
 807     if (size != 0) st->print("\n\t");
 808     if (reg_lo+1 == reg_hi) { // double move?
 809       if (is_load) st->print("%s %s,[ESP + #%d]",
 810                               UseXmmLoadAndClearUpper ? "MOVSD " : "MOVLPD",
 811                               Matcher::regName[reg_lo], offset);
 812       else         st->print("MOVSD  [ESP + #%d],%s",
 813                               offset, Matcher::regName[reg_lo]);
 814     } else {
 815       if (is_load) st->print("MOVSS  %s,[ESP + #%d]",
 816                               Matcher::regName[reg_lo], offset);
 817       else         st->print("MOVSS  [ESP + #%d],%s",
 818                               offset, Matcher::regName[reg_lo]);
 819     }
 820 #endif
 821   }
 822   bool is_single_byte = false;
 823   if ((UseAVX > 2) && (offset != 0)) {
 824     is_single_byte = Assembler::query_compressed_disp_byte(offset, true, 0, Assembler::EVEX_T1S, in_size_in_bits, evex_encoding);
 825   }
 826   int offset_size = 0;
 827   if (UseAVX > 2 ) {
 828     offset_size = (offset == 0) ? 0 : ((is_single_byte) ? 1 : 4);
 829   } else {
 830     offset_size = (offset == 0) ? 0 : ((offset <= 127) ? 1 : 4);
 831   }
 832   size += (UseAVX > 2) ? 2 : 0; // Need an additional two bytes for EVEX
 833   // VEX_2bytes prefix is used if UseAVX > 0, so it takes the same 2 bytes as SIMD prefix.
 834   return size+5+offset_size;
 835 }
 836 
 837 
 838 static int impl_movx_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
 839                             int src_hi, int dst_hi, int size, outputStream* st ) {
 840   if (cbuf) {
 841     MacroAssembler _masm(cbuf);
 842     if (src_lo+1 == src_hi && dst_lo+1 == dst_hi) { // double move?
 843       __ movdbl(as_XMMRegister(Matcher::_regEncode[dst_lo]),
 844                 as_XMMRegister(Matcher::_regEncode[src_lo]));
 845     } else {
 846       __ movflt(as_XMMRegister(Matcher::_regEncode[dst_lo]),
 847                 as_XMMRegister(Matcher::_regEncode[src_lo]));
 848     }
 849 #ifndef PRODUCT
 850   } else if (!do_size) {
 851     if (size != 0) st->print("\n\t");
 852     if (UseXmmRegToRegMoveAll) {//Use movaps,movapd to move between xmm registers
 853       if (src_lo+1 == src_hi && dst_lo+1 == dst_hi) { // double move?
 854         st->print("MOVAPD %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]);
 855       } else {
 856         st->print("MOVAPS %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]);
 857       }
 858     } else {
 859       if( src_lo+1 == src_hi && dst_lo+1 == dst_hi ) { // double move?
 860         st->print("MOVSD  %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]);
 861       } else {
 862         st->print("MOVSS  %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]);
 863       }
 864     }
 865 #endif
 866   }
 867   // VEX_2bytes prefix is used if UseAVX > 0, and it takes the same 2 bytes as SIMD prefix.
 868   // Only MOVAPS SSE prefix uses 1 byte.  EVEX uses an additional 2 bytes.
 869   int sz = (UseAVX > 2) ? 6 : 4;
 870   if (!(src_lo+1 == src_hi && dst_lo+1 == dst_hi) &&
 871       UseXmmRegToRegMoveAll && (UseAVX == 0)) sz = 3;
 872   return size + sz;
 873 }
 874 
 875 static int impl_movgpr2x_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
 876                             int src_hi, int dst_hi, int size, outputStream* st ) {
 877   // 32-bit
 878   if (cbuf) {
 879     MacroAssembler _masm(cbuf);
 880     __ movdl(as_XMMRegister(Matcher::_regEncode[dst_lo]),
 881              as_Register(Matcher::_regEncode[src_lo]));
 882 #ifndef PRODUCT
 883   } else if (!do_size) {
 884     st->print("movdl   %s, %s\t# spill", Matcher::regName[dst_lo], Matcher::regName[src_lo]);
 885 #endif
 886   }
 887   return (UseAVX> 2) ? 6 : 4;
 888 }
 889 
 890 
 891 static int impl_movx2gpr_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
 892                                  int src_hi, int dst_hi, int size, outputStream* st ) {
 893   // 32-bit
 894   if (cbuf) {
 895     MacroAssembler _masm(cbuf);
 896     __ movdl(as_Register(Matcher::_regEncode[dst_lo]),
 897              as_XMMRegister(Matcher::_regEncode[src_lo]));
 898 #ifndef PRODUCT
 899   } else if (!do_size) {
 900     st->print("movdl   %s, %s\t# spill", Matcher::regName[dst_lo], Matcher::regName[src_lo]);
 901 #endif
 902   }
 903   return (UseAVX> 2) ? 6 : 4;
 904 }
 905 
 906 static int impl_mov_helper( CodeBuffer *cbuf, bool do_size, int src, int dst, int size, outputStream* st ) {
 907   if( cbuf ) {
 908     emit_opcode(*cbuf, 0x8B );
 909     emit_rm    (*cbuf, 0x3, Matcher::_regEncode[dst], Matcher::_regEncode[src] );
 910 #ifndef PRODUCT
 911   } else if( !do_size ) {
 912     if( size != 0 ) st->print("\n\t");
 913     st->print("MOV    %s,%s",Matcher::regName[dst],Matcher::regName[src]);
 914 #endif
 915   }
 916   return size+2;
 917 }
 918 
 919 static int impl_fp_store_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int src_hi, int dst_lo, int dst_hi,
 920                                  int offset, int size, outputStream* st ) {
 921   if( src_lo != FPR1L_num ) {      // Move value to top of FP stack, if not already there
 922     if( cbuf ) {
 923       emit_opcode( *cbuf, 0xD9 );  // FLD (i.e., push it)


 954                             int stack_offset, int reg, uint ireg, outputStream* st);
 955 
 956 static int vec_stack_to_stack_helper(CodeBuffer *cbuf, bool do_size, int src_offset,
 957                                      int dst_offset, uint ireg, outputStream* st) {
 958   int calc_size = 0;
 959   int src_offset_size = (src_offset == 0) ? 0 : ((src_offset < 0x80) ? 1 : 4);
 960   int dst_offset_size = (dst_offset == 0) ? 0 : ((dst_offset < 0x80) ? 1 : 4);
 961   switch (ireg) {
 962   case Op_VecS:
 963     calc_size = 3+src_offset_size + 3+dst_offset_size;
 964     break;
 965   case Op_VecD:
 966     calc_size = 3+src_offset_size + 3+dst_offset_size;
 967     src_offset += 4;
 968     dst_offset += 4;
 969     src_offset_size = (src_offset == 0) ? 0 : ((src_offset < 0x80) ? 1 : 4);
 970     dst_offset_size = (dst_offset == 0) ? 0 : ((dst_offset < 0x80) ? 1 : 4);
 971     calc_size += 3+src_offset_size + 3+dst_offset_size;
 972     break;
 973   case Op_VecX:


 974   case Op_VecY:
 975   case Op_VecZ:
 976     calc_size = 6 + 6 + 5+src_offset_size + 5+dst_offset_size;
 977     break;
 978   default:
 979     ShouldNotReachHere();
 980   }
 981   if (cbuf) {
 982     MacroAssembler _masm(cbuf);
 983     int offset = __ offset();
 984     switch (ireg) {
 985     case Op_VecS:
 986       __ pushl(Address(rsp, src_offset));
 987       __ popl (Address(rsp, dst_offset));
 988       break;
 989     case Op_VecD:
 990       __ pushl(Address(rsp, src_offset));
 991       __ popl (Address(rsp, dst_offset));
 992       __ pushl(Address(rsp, src_offset+4));
 993       __ popl (Address(rsp, dst_offset+4));
 994       break;
 995     case Op_VecX:
 996       __ movdqu(Address(rsp, -16), xmm0);
 997       __ movdqu(xmm0, Address(rsp, src_offset));
 998       __ movdqu(Address(rsp, dst_offset), xmm0);
 999       __ movdqu(xmm0, Address(rsp, -16));
1000       break;
1001     case Op_VecY:
1002       __ vmovdqu(Address(rsp, -32), xmm0);
1003       __ vmovdqu(xmm0, Address(rsp, src_offset));
1004       __ vmovdqu(Address(rsp, dst_offset), xmm0);
1005       __ vmovdqu(xmm0, Address(rsp, -32));
1006     case Op_VecZ:
1007       __ evmovdqu(Address(rsp, -64), xmm0, 2);
1008       __ evmovdqu(xmm0, Address(rsp, src_offset), 2);
1009       __ evmovdqu(Address(rsp, dst_offset), xmm0, 2);
1010       __ evmovdqu(xmm0, Address(rsp, -64), 2);
1011       break;
1012     default:
1013       ShouldNotReachHere();
1014     }
1015     int size = __ offset() - offset;
1016     assert(size == calc_size, "incorrect size calculattion");
1017     return size;
1018 #ifndef PRODUCT
1019   } else if (!do_size) {
1020     switch (ireg) {
1021     case Op_VecS:
1022       st->print("pushl   [rsp + #%d]\t# 32-bit mem-mem spill\n\t"
1023                 "popl    [rsp + #%d]",
1024                 src_offset, dst_offset);
1025       break;
1026     case Op_VecD:
1027       st->print("pushl   [rsp + #%d]\t# 64-bit mem-mem spill\n\t"
1028                 "popq    [rsp + #%d]\n\t"
1029                 "pushl   [rsp + #%d]\n\t"
1030                 "popq    [rsp + #%d]",
1031                 src_offset, dst_offset, src_offset+4, dst_offset+4);
1032       break;
1033      case Op_VecX:
1034       st->print("movdqu  [rsp - #16], xmm0\t# 128-bit mem-mem spill\n\t"
1035                 "movdqu  xmm0, [rsp + #%d]\n\t"
1036                 "movdqu  [rsp + #%d], xmm0\n\t"
1037                 "movdqu  xmm0, [rsp - #16]",
1038                 src_offset, dst_offset);
1039       break;
1040     case Op_VecY:
1041       st->print("vmovdqu [rsp - #32], xmm0\t# 256-bit mem-mem spill\n\t"
1042                 "vmovdqu xmm0, [rsp + #%d]\n\t"
1043                 "vmovdqu [rsp + #%d], xmm0\n\t"
1044                 "vmovdqu xmm0, [rsp - #32]",
1045                 src_offset, dst_offset);
1046     case Op_VecZ:
1047       st->print("vmovdqu [rsp - #64], xmm0\t# 512-bit mem-mem spill\n\t"
1048                 "vmovdqu xmm0, [rsp + #%d]\n\t"
1049                 "vmovdqu [rsp + #%d], xmm0\n\t"
1050                 "vmovdqu xmm0, [rsp - #64]",
1051                 src_offset, dst_offset);
1052       break;
1053     default:
1054       ShouldNotReachHere();
1055     }
1056 #endif
1057   }
1058   return calc_size;
1059 }
1060 
1061 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
1062   // Get registers to move
1063   OptoReg::Name src_second = ra_->get_reg_second(in(1));
1064   OptoReg::Name src_first = ra_->get_reg_first(in(1));
1065   OptoReg::Name dst_second = ra_->get_reg_second(this );
1066   OptoReg::Name dst_first = ra_->get_reg_first(this );
1067 
1068   enum RC src_second_rc = rc_class(src_second);
1069   enum RC src_first_rc = rc_class(src_first);
1070   enum RC dst_second_rc = rc_class(dst_second);
1071   enum RC dst_first_rc = rc_class(dst_first);
1072 
1073   assert( OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
1074 
1075   // Generate spill code!
1076   int size = 0;
1077 
1078   if( src_first == dst_first && src_second == dst_second )
1079     return size;            // Self copy, no move
1080 
1081   if (bottom_type()->isa_vect() != NULL) {
1082     uint ireg = ideal_reg();
1083     assert((src_first_rc != rc_int && dst_first_rc != rc_int), "sanity");
1084     assert((src_first_rc != rc_float && dst_first_rc != rc_float), "sanity");
1085     assert((ireg == Op_VecS || ireg == Op_VecD || ireg == Op_VecX || ireg == Op_VecY || ireg == Op_VecZ ), "sanity");
1086     if( src_first_rc == rc_stack && dst_first_rc == rc_stack ) {
1087       // mem -> mem
1088       int src_offset = ra_->reg2offset(src_first);
1089       int dst_offset = ra_->reg2offset(dst_first);
1090       return vec_stack_to_stack_helper(cbuf, do_size, src_offset, dst_offset, ireg, st);
1091     } else if (src_first_rc == rc_xmm && dst_first_rc == rc_xmm ) {
1092       return vec_mov_helper(cbuf, do_size, src_first, dst_first, src_second, dst_second, ireg, st);
1093     } else if (src_first_rc == rc_xmm && dst_first_rc == rc_stack ) {
1094       int stack_offset = ra_->reg2offset(dst_first);
1095       return vec_spill_helper(cbuf, do_size, false, stack_offset, src_first, ireg, st);
1096     } else if (src_first_rc == rc_stack && dst_first_rc == rc_xmm ) {
1097       int stack_offset = ra_->reg2offset(src_first);
1098       return vec_spill_helper(cbuf, do_size, true,  stack_offset, dst_first, ireg, st);
1099     } else {
1100       ShouldNotReachHere();
1101     }
1102   }
1103 
1104   // --------------------------------------
1105   // Check for mem-mem move.  push/pop to move.


4021   predicate( UseSSE < 2 );
4022   constraint(ALLOC_IN_RC(fp_flt_reg));
4023   match(RegF);
4024   match(regFPR1);
4025   format %{ %}
4026   interface(REG_INTER);
4027 %}
4028 
4029 // Float register operands
4030 operand regFPR1(regFPR reg) %{
4031   predicate( UseSSE < 2 );
4032   constraint(ALLOC_IN_RC(fp_flt_reg0));
4033   match(reg);
4034   format %{ "FPR1" %}
4035   interface(REG_INTER);
4036 %}
4037 
4038 // XMM Float register operands
4039 operand regF() %{
4040   predicate( UseSSE>=1 );
4041   constraint(ALLOC_IN_RC(float_reg_legacy));
4042   match(RegF);
4043   format %{ %}
4044   interface(REG_INTER);
4045 %}
4046 
4047 // XMM Double register operands
4048 operand regD() %{
4049   predicate( UseSSE>=2 );
4050   constraint(ALLOC_IN_RC(double_reg_legacy));
4051   match(RegD);
4052   format %{ %}
4053   interface(REG_INTER);
4054 %}
4055 
4056 // Vectors : note, we use legacy registers to avoid extra (unneeded in 32-bit VM)
4057 // runtime code generation via reg_class_dynamic.
4058 operand vecS() %{
4059   constraint(ALLOC_IN_RC(vectors_reg_legacy));
4060   match(VecS);
4061 
4062   format %{ %}
4063   interface(REG_INTER);
4064 %}
4065 
4066 operand vecD() %{
4067   constraint(ALLOC_IN_RC(vectord_reg_legacy));
4068   match(VecD);
4069 
4070   format %{ %}
4071   interface(REG_INTER);
4072 %}
4073 
4074 operand vecX() %{
4075   constraint(ALLOC_IN_RC(vectorx_reg_legacy));
4076   match(VecX);
4077 
4078   format %{ %}
4079   interface(REG_INTER);
4080 %}
4081 
4082 operand vecY() %{
4083   constraint(ALLOC_IN_RC(vectory_reg_legacy));
4084   match(VecY);
4085 
4086   format %{ %}
4087   interface(REG_INTER);
4088 %}
4089 
4090 //----------Memory Operands----------------------------------------------------
4091 // Direct Memory Operand
4092 operand direct(immP addr) %{
4093   match(addr);
4094 
4095   format %{ "[$addr]" %}
4096   interface(MEMORY_INTER) %{
4097     base(0xFFFFFFFF);
4098     index(0x4);
4099     scale(0x0);
4100     disp($addr);
4101   %}
4102 %}
4103 
4104 // Indirect Memory Operand
4105 operand indirect(eRegP reg) %{
4106   constraint(ALLOC_IN_RC(int_reg));
4107   match(reg);
4108 


11224   match(Set dst (ConvL2F src));
11225   effect( KILL cr );
11226   format %{ "PUSH   $src.hi\t# Convert long to single float\n\t"
11227             "PUSH   $src.lo\n\t"
11228             "FILD   ST,[ESP + #0]\n\t"
11229             "ADD    ESP,8\n\t"
11230             "FSTP_S $dst\t# F-round" %}
11231   opcode(0xDF, 0x5);  /* DF /5 */
11232   ins_encode(convert_long_double(src), Pop_Mem_FPR(dst));
11233   ins_pipe( pipe_slow );
11234 %}
11235 
11236 instruct convL2I_reg( rRegI dst, eRegL src ) %{
11237   match(Set dst (ConvL2I src));
11238   effect( DEF dst, USE src );
11239   format %{ "MOV    $dst,$src.lo" %}
11240   ins_encode(enc_CopyL_Lo(dst,src));
11241   ins_pipe( ialu_reg_reg );
11242 %}
11243 

11244 instruct MoveF2I_stack_reg(rRegI dst, stackSlotF src) %{
11245   match(Set dst (MoveF2I src));
11246   effect( DEF dst, USE src );
11247   ins_cost(100);
11248   format %{ "MOV    $dst,$src\t# MoveF2I_stack_reg" %}
11249   ins_encode %{
11250     __ movl($dst$$Register, Address(rsp, $src$$disp));
11251   %}
11252   ins_pipe( ialu_reg_mem );
11253 %}
11254 
11255 instruct MoveFPR2I_reg_stack(stackSlotI dst, regFPR src) %{
11256   predicate(UseSSE==0);
11257   match(Set dst (MoveF2I src));
11258   effect( DEF dst, USE src );
11259 
11260   ins_cost(125);
11261   format %{ "FST_S  $dst,$src\t# MoveF2I_reg_stack" %}
11262   ins_encode( Pop_Mem_Reg_FPR(dst, src) );
11263   ins_pipe( fpu_mem_reg );


src/cpu/x86/vm/x86_32.ad
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File