src/cpu/x86/vm/x86_32.ad
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/cpu/x86/vm

src/cpu/x86/vm/x86_32.ad

Print this page
rev 10047 : 8147386: assert(size == calc_size) failed: incorrect size calculattion x86_32.ad
Summary: incorrect offset used in spill code for vectors
Reviewed-by:


 951 
 952   return impl_helper(cbuf,do_size,false,offset,st_op,op,op_str,size, st);
 953 }
 954 
 955 // Next two methods are shared by 32- and 64-bit VM. They are defined in x86.ad.
 956 static int vec_mov_helper(CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
 957                           int src_hi, int dst_hi, uint ireg, outputStream* st);
 958 
 959 static int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load,
 960                             int stack_offset, int reg, uint ireg, outputStream* st);
 961 
 962 static int vec_stack_to_stack_helper(CodeBuffer *cbuf, bool do_size, int src_offset,
 963                                      int dst_offset, uint ireg, outputStream* st) {
 964   int calc_size = 0;
 965   int src_offset_size = (src_offset == 0) ? 0 : ((src_offset < 0x80) ? 1 : 4);
 966   int dst_offset_size = (dst_offset == 0) ? 0 : ((dst_offset < 0x80) ? 1 : 4);
 967   switch (ireg) {
 968   case Op_VecS:
 969     calc_size = 3+src_offset_size + 3+dst_offset_size;
 970     break;
 971   case Op_VecD:
 972     calc_size = 3+src_offset_size + 3+dst_offset_size;
 973     src_offset += 4;
 974     dst_offset += 4;
 975     src_offset_size = (src_offset == 0) ? 0 : ((src_offset < 0x80) ? 1 : 4);
 976     dst_offset_size = (dst_offset == 0) ? 0 : ((dst_offset < 0x80) ? 1 : 4);
 977     calc_size += 3+src_offset_size + 3+dst_offset_size;
 978     break;

 979   case Op_VecX:
 980   case Op_VecY:
 981   case Op_VecZ:
 982     calc_size = 6 + 6 + 5+src_offset_size + 5+dst_offset_size;
 983     break;
 984   default:
 985     ShouldNotReachHere();
 986   }
 987   if (cbuf) {
 988     MacroAssembler _masm(cbuf);
 989     int offset = __ offset();
 990     switch (ireg) {
 991     case Op_VecS:
 992       __ pushl(Address(rsp, src_offset));
 993       __ popl (Address(rsp, dst_offset));
 994       break;
 995     case Op_VecD:
 996       __ pushl(Address(rsp, src_offset));
 997       __ popl (Address(rsp, dst_offset));
 998       __ pushl(Address(rsp, src_offset+4));


1003       __ movdqu(xmm0, Address(rsp, src_offset));
1004       __ movdqu(Address(rsp, dst_offset), xmm0);
1005       __ movdqu(xmm0, Address(rsp, -16));
1006       break;
1007     case Op_VecY:
1008       __ vmovdqu(Address(rsp, -32), xmm0);
1009       __ vmovdqu(xmm0, Address(rsp, src_offset));
1010       __ vmovdqu(Address(rsp, dst_offset), xmm0);
1011       __ vmovdqu(xmm0, Address(rsp, -32));
1012       break;
1013     case Op_VecZ:
1014       __ evmovdqul(Address(rsp, -64), xmm0, 2);
1015       __ evmovdqul(xmm0, Address(rsp, src_offset), 2);
1016       __ evmovdqul(Address(rsp, dst_offset), xmm0, 2);
1017       __ evmovdqul(xmm0, Address(rsp, -64), 2);
1018       break;
1019     default:
1020       ShouldNotReachHere();
1021     }
1022     int size = __ offset() - offset;
1023     assert(size == calc_size, "incorrect size calculattion");
1024     return size;
1025 #ifndef PRODUCT
1026   } else if (!do_size) {
1027     switch (ireg) {
1028     case Op_VecS:
1029       st->print("pushl   [rsp + #%d]\t# 32-bit mem-mem spill\n\t"
1030                 "popl    [rsp + #%d]",
1031                 src_offset, dst_offset);
1032       break;
1033     case Op_VecD:
1034       st->print("pushl   [rsp + #%d]\t# 64-bit mem-mem spill\n\t"
1035                 "popq    [rsp + #%d]\n\t"
1036                 "pushl   [rsp + #%d]\n\t"
1037                 "popq    [rsp + #%d]",
1038                 src_offset, dst_offset, src_offset+4, dst_offset+4);
1039       break;
1040      case Op_VecX:
1041       st->print("movdqu  [rsp - #16], xmm0\t# 128-bit mem-mem spill\n\t"
1042                 "movdqu  xmm0, [rsp + #%d]\n\t"
1043                 "movdqu  [rsp + #%d], xmm0\n\t"




 951 
 952   return impl_helper(cbuf,do_size,false,offset,st_op,op,op_str,size, st);
 953 }
 954 
 955 // Next two methods are shared by 32- and 64-bit VM. They are defined in x86.ad.
 956 static int vec_mov_helper(CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
 957                           int src_hi, int dst_hi, uint ireg, outputStream* st);
 958 
 959 static int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load,
 960                             int stack_offset, int reg, uint ireg, outputStream* st);
 961 
 962 static int vec_stack_to_stack_helper(CodeBuffer *cbuf, bool do_size, int src_offset,
 963                                      int dst_offset, uint ireg, outputStream* st) {
 964   int calc_size = 0;
 965   int src_offset_size = (src_offset == 0) ? 0 : ((src_offset < 0x80) ? 1 : 4);
 966   int dst_offset_size = (dst_offset == 0) ? 0 : ((dst_offset < 0x80) ? 1 : 4);
 967   switch (ireg) {
 968   case Op_VecS:
 969     calc_size = 3+src_offset_size + 3+dst_offset_size;
 970     break;
 971   case Op_VecD: {
 972     calc_size = 3+src_offset_size + 3+dst_offset_size;
 973     int tmp_src_offset = src_offset + 4;
 974     int tmp_dst_offset = dst_offset + 4;
 975     src_offset_size = (tmp_src_offset == 0) ? 0 : ((tmp_src_offset < 0x80) ? 1 : 4);
 976     dst_offset_size = (tmp_dst_offset == 0) ? 0 : ((tmp_dst_offset < 0x80) ? 1 : 4);
 977     calc_size += 3+src_offset_size + 3+dst_offset_size;
 978     break;
 979   }   
 980   case Op_VecX:
 981   case Op_VecY:
 982   case Op_VecZ:
 983     calc_size = 6 + 6 + 5+src_offset_size + 5+dst_offset_size;
 984     break;
 985   default:
 986     ShouldNotReachHere();
 987   }
 988   if (cbuf) {
 989     MacroAssembler _masm(cbuf);
 990     int offset = __ offset();
 991     switch (ireg) {
 992     case Op_VecS:
 993       __ pushl(Address(rsp, src_offset));
 994       __ popl (Address(rsp, dst_offset));
 995       break;
 996     case Op_VecD:
 997       __ pushl(Address(rsp, src_offset));
 998       __ popl (Address(rsp, dst_offset));
 999       __ pushl(Address(rsp, src_offset+4));


1004       __ movdqu(xmm0, Address(rsp, src_offset));
1005       __ movdqu(Address(rsp, dst_offset), xmm0);
1006       __ movdqu(xmm0, Address(rsp, -16));
1007       break;
1008     case Op_VecY:
1009       __ vmovdqu(Address(rsp, -32), xmm0);
1010       __ vmovdqu(xmm0, Address(rsp, src_offset));
1011       __ vmovdqu(Address(rsp, dst_offset), xmm0);
1012       __ vmovdqu(xmm0, Address(rsp, -32));
1013       break;
1014     case Op_VecZ:
1015       __ evmovdqul(Address(rsp, -64), xmm0, 2);
1016       __ evmovdqul(xmm0, Address(rsp, src_offset), 2);
1017       __ evmovdqul(Address(rsp, dst_offset), xmm0, 2);
1018       __ evmovdqul(xmm0, Address(rsp, -64), 2);
1019       break;
1020     default:
1021       ShouldNotReachHere();
1022     }
1023     int size = __ offset() - offset;
1024     assert(size == calc_size, "incorrect size calculation");
1025     return size;
1026 #ifndef PRODUCT
1027   } else if (!do_size) {
1028     switch (ireg) {
1029     case Op_VecS:
1030       st->print("pushl   [rsp + #%d]\t# 32-bit mem-mem spill\n\t"
1031                 "popl    [rsp + #%d]",
1032                 src_offset, dst_offset);
1033       break;
1034     case Op_VecD:
1035       st->print("pushl   [rsp + #%d]\t# 64-bit mem-mem spill\n\t"
1036                 "popq    [rsp + #%d]\n\t"
1037                 "pushl   [rsp + #%d]\n\t"
1038                 "popq    [rsp + #%d]",
1039                 src_offset, dst_offset, src_offset+4, dst_offset+4);
1040       break;
1041      case Op_VecX:
1042       st->print("movdqu  [rsp - #16], xmm0\t# 128-bit mem-mem spill\n\t"
1043                 "movdqu  xmm0, [rsp + #%d]\n\t"
1044                 "movdqu  [rsp + #%d], xmm0\n\t"


src/cpu/x86/vm/x86_32.ad
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File